patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -98,7 +98,7 @@ public class CodeFirstSpringmvc extends CodeFirstSpringmvcBase { @RequestMapping(path = "/uploadWithoutAnnotation", method = RequestMethod.POST, produces = MediaType.TEXT_PLAIN_VALUE, consumes = MediaType.MULTIPART_FORM_DATA_VALUE) public String fileUploadWithoutAnnotation(MultipartFile file1, MultipartFile file2, - @RequestAttribute("name") String name) { + @RequestAttribute(name = "name", required = false) String name) { return super.fileUpload(file1, file2, name); }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.demo.springmvc.tests.endpoints; import java.util.Date; import java.util.List; import java.util.Map; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.QueryParam; import org.apache.servicecomb.demo.controller.Person; import org.apache.servicecomb.demo.server.User; import org.apache.servicecomb.provider.rest.common.RestSchema; import org.apache.servicecomb.swagger.extend.annotations.ResponseHeaders; import org.apache.servicecomb.swagger.invocation.Response; import org.apache.servicecomb.swagger.invocation.context.InvocationContext; import org.springframework.context.annotation.Profile; import org.springframework.http.MediaType; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.CookieValue; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestAttribute; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestHeader; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RequestPart; import org.springframework.web.multipart.MultipartFile; import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiResponse; import io.swagger.annotations.ResponseHeader; @Profile("!SimplifiedMapping") @RestSchema(schemaId = "codeFirst") @RequestMapping(path = "/codeFirstSpringmvc", produces = MediaType.APPLICATION_JSON_VALUE) public class CodeFirstSpringmvc extends CodeFirstSpringmvcBase { @ResponseHeaders({@ResponseHeader(name = "h1", response = String.class), @ResponseHeader(name = "h2", response = String.class)}) @RequestMapping(path = "/responseEntity", method = RequestMethod.POST, consumes = MediaType.APPLICATION_FORM_URLENCODED_VALUE) @Override public ResponseEntity<Date> responseEntity(InvocationContext c1, @RequestAttribute("date") Date date) { return super.responseEntity(c1, date); } @ApiResponse(code = 200, response = User.class, message = "") @ResponseHeaders({@ResponseHeader(name = "h1", response = String.class), @ResponseHeader(name = "h2", response = String.class)}) @RequestMapping(path = "/cseResponse", method = RequestMethod.GET) @Override public Response cseResponse(InvocationContext c1) { return super.cseResponse(c1); } @RequestMapping(path = "/testUserMap", method = RequestMethod.POST) @Override public Map<String, User> testUserMap(@RequestBody Map<String, User> userMap) { return super.testUserMap(userMap); } @RequestMapping(path = "/textPlain", method = RequestMethod.POST, consumes = MediaType.TEXT_PLAIN_VALUE) @Override public String textPlain(@RequestBody String body) { return super.textPlain(body); } @RequestMapping(path = "/bytes", method = RequestMethod.POST) @Override public byte[] bytes(@RequestBody byte[] input) { return super.bytes(input); } @RequestMapping(path = "/upload", method = RequestMethod.POST, produces = MediaType.TEXT_PLAIN_VALUE, consumes = MediaType.MULTIPART_FORM_DATA_VALUE) @Override public String fileUpload(@RequestPart(name = "file1") MultipartFile file1, @RequestPart(name = "someFile") MultipartFile file2, @RequestAttribute("name") String name) { return super.fileUpload(file1, file2, name); } @RequestMapping(path = "/uploadWithoutAnnotation", method = RequestMethod.POST, produces = MediaType.TEXT_PLAIN_VALUE, consumes = MediaType.MULTIPART_FORM_DATA_VALUE) public String fileUploadWithoutAnnotation(MultipartFile file1, MultipartFile file2, @RequestAttribute("name") String name) { return super.fileUpload(file1, file2, name); } @RequestMapping(path = "/addDate", method = RequestMethod.POST) @Override public Date addDate(@RequestAttribute("date") Date date, @QueryParam("seconds") long seconds) { return super.addDate(date, seconds); } // this should be ignored as it's hidden @ApiOperation(value = "", hidden = true, httpMethod = "POST") public int add(@RequestParam("a") int a) { return a; } @RequestMapping(path = "/add", method = RequestMethod.POST, consumes = MediaType.APPLICATION_FORM_URLENCODED_VALUE) @Override public int add(@RequestAttribute("a") int a, @RequestAttribute("b") int b) { return super.add(a, b); } @RequestMapping(path = "/reduce", method = RequestMethod.GET) @ApiImplicitParams({@ApiImplicitParam(name = "a", dataType = "integer", format = "int32", paramType = "query")}) @Override public int reduce(HttpServletRequest request, @CookieValue(name = "b") int b) { return super.reduce(request, b); } @RequestMapping(path = "/sayhello", method = RequestMethod.POST) @Override public Person sayHello(@RequestBody Person user) { return super.sayHello(user); } @RequestMapping(path = "/testrawjson", method = RequestMethod.POST) @Override public String testRawJsonString(String jsonInput) { return super.testRawJsonString(jsonInput); } @RequestMapping(path = "/saysomething", method = RequestMethod.POST) @Override public String saySomething(@RequestHeader(name = "prefix") String prefix, @RequestBody Person user) { return super.saySomething(prefix, user); } @RequestMapping(path = "/sayhi/{name}", method = RequestMethod.PUT) @Override public String sayHi(@PathVariable(name = "name") String name) { return super.sayHi(name); } @RequestMapping(path = "/sayhi/{name}/v2", method = RequestMethod.PUT) @Override public String sayHi2(@PathVariable(name = "name") String name) { return super.sayHi2(name); } @RequestMapping(path = "/istrue", method = RequestMethod.GET) @Override public boolean isTrue() { return super.isTrue(); } @RequestMapping(path = "/addstring", method = RequestMethod.DELETE, produces = MediaType.TEXT_PLAIN_VALUE) @Override public String addString(@RequestParam(name = "s") List<String> s) { return super.addString(s); } }
1
10,381
Do we have test cases that name is null?
apache-servicecomb-java-chassis
java
@@ -109,8 +109,16 @@ type SumIntAgg struct { func (a *SumIntAgg) DoInt(vs *array.Int64) { // https://issues.apache.org/jira/browse/ARROW-4081 - if vs.Len() > 0 { - a.sum += math.Int64.Sum(vs) + if l := vs.Len() - vs.NullN(); l > 0 { + if vs.NullN() == 0 { + a.sum += math.Int64.Sum(vs) + } else { + for i := 0; i < vs.Len(); i++ { + if vs.IsValid(i) { + a.sum += vs.Value(i) + } + } + } } } func (a *SumIntAgg) Type() flux.ColType {
1
package transformations import ( "fmt" "github.com/apache/arrow/go/arrow/array" "github.com/apache/arrow/go/arrow/math" "github.com/influxdata/flux" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/plan" ) const SumKind = "sum" type SumOpSpec struct { execute.AggregateConfig } func init() { sumSignature := execute.AggregateSignature(nil, nil) flux.RegisterFunction(SumKind, createSumOpSpec, sumSignature) flux.RegisterOpSpec(SumKind, newSumOp) plan.RegisterProcedureSpec(SumKind, newSumProcedure, SumKind) execute.RegisterTransformation(SumKind, createSumTransformation) } func createSumOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) { if err := a.AddParentFromArgs(args); err != nil { return nil, err } s := new(SumOpSpec) if err := s.AggregateConfig.ReadArgs(args); err != nil { return s, err } return s, nil } func newSumOp() flux.OperationSpec { return new(SumOpSpec) } func (s *SumOpSpec) Kind() flux.OperationKind { return SumKind } type SumProcedureSpec struct { execute.AggregateConfig } func newSumProcedure(qs flux.OperationSpec, a plan.Administration) (plan.ProcedureSpec, error) { spec, ok := qs.(*SumOpSpec) if !ok { return nil, fmt.Errorf("invalid spec type %T", qs) } return &SumProcedureSpec{ AggregateConfig: spec.AggregateConfig, }, nil } func (s *SumProcedureSpec) Kind() plan.ProcedureKind { return SumKind } func (s *SumProcedureSpec) Copy() plan.ProcedureSpec { return &SumProcedureSpec{ AggregateConfig: s.AggregateConfig, } } func (s *SumProcedureSpec) AggregateMethod() string { return SumKind } func (s *SumProcedureSpec) ReAggregateSpec() plan.ProcedureSpec { return new(SumProcedureSpec) } type SumAgg struct{} func createSumTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { s, ok := spec.(*SumProcedureSpec) if !ok { return nil, nil, fmt.Errorf("invalid spec type %T", spec) } t, d := execute.NewAggregateTransformationAndDataset(id, mode, new(SumAgg), s.AggregateConfig, a.Allocator()) return t, d, nil } func (a *SumAgg) NewBoolAgg() execute.DoBoolAgg { return nil } func (a *SumAgg) NewIntAgg() execute.DoIntAgg { return new(SumIntAgg) } func (a *SumAgg) NewUIntAgg() execute.DoUIntAgg { return new(SumUIntAgg) } func (a *SumAgg) NewFloatAgg() execute.DoFloatAgg { return new(SumFloatAgg) } func (a *SumAgg) NewStringAgg() execute.DoStringAgg { return nil } type SumIntAgg struct { sum int64 } func (a *SumIntAgg) DoInt(vs *array.Int64) { // https://issues.apache.org/jira/browse/ARROW-4081 if vs.Len() > 0 { a.sum += math.Int64.Sum(vs) } } func (a *SumIntAgg) Type() flux.ColType { return flux.TInt } func (a *SumIntAgg) ValueInt() int64 { return a.sum } type SumUIntAgg struct { sum uint64 } func (a *SumUIntAgg) DoUInt(vs *array.Uint64) { // https://issues.apache.org/jira/browse/ARROW-4081 if vs.Len() > 0 { a.sum += math.Uint64.Sum(vs) } } func (a *SumUIntAgg) Type() flux.ColType { return flux.TUInt } func (a *SumUIntAgg) ValueUInt() uint64 { return a.sum } type SumFloatAgg struct { sum float64 } func (a *SumFloatAgg) DoFloat(vs *array.Float64) { // https://issues.apache.org/jira/browse/ARROW-4081 if vs.Len() > 0 { a.sum += math.Float64.Sum(vs) } } func (a *SumFloatAgg) Type() flux.ColType { return flux.TFloat } func (a *SumFloatAgg) ValueFloat() float64 { return a.sum }
1
9,469
in a separate PR, I'll check for a table that is empty/all-null and skip this function call completely.
influxdata-flux
go
@@ -47,7 +47,7 @@ public class WebUtils { try { azkabanEventReporter = ServiceProvider.SERVICE_PROVIDER .getInstance(AzkabanEventReporter.class); - } catch (NullPointerException | ConfigurationException e) { + } catch (Exception e) { Logger.getLogger(WebUtils.class.getName()).warn("AzkabanEventReporter not configured", e); } }
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.webapp.servlet; import static azkaban.Constants.ConfigurationKeys.AZKABAN_SERVER_HOST_NAME; import azkaban.ServiceProvider; import azkaban.executor.Status; import azkaban.spi.AzkabanEventReporter; import azkaban.spi.EventType; import azkaban.webapp.AzkabanWebServer; import com.google.common.base.Strings; import com.google.inject.ConfigurationException; import java.net.InetAddress; import java.net.UnknownHostException; import java.text.NumberFormat; import java.util.HashMap; import java.util.Map; import javax.servlet.http.HttpServletRequest; import org.apache.log4j.Logger; public class WebUtils { public static final String X_FORWARDED_FOR_HEADER = "X-Forwarded-For"; private static final long ONE_KB = 1024; private static final long ONE_MB = 1024 * ONE_KB; private static final long ONE_GB = 1024 * ONE_MB; private static final long ONE_TB = 1024 * ONE_GB; private static AzkabanEventReporter azkabanEventReporter; static { try { azkabanEventReporter = ServiceProvider.SERVICE_PROVIDER .getInstance(AzkabanEventReporter.class); } catch (NullPointerException | ConfigurationException e) { Logger.getLogger(WebUtils.class.getName()).warn("AzkabanEventReporter not configured", e); } } public static String displayBytes(final long sizeBytes) { final NumberFormat nf = NumberFormat.getInstance(); nf.setMaximumFractionDigits(2); if (sizeBytes >= ONE_TB) { return nf.format(sizeBytes / (double) ONE_TB) + " tb"; } else if (sizeBytes >= ONE_GB) { return nf.format(sizeBytes / (double) ONE_GB) + " gb"; } else if (sizeBytes >= ONE_MB) { return nf.format(sizeBytes / (double) ONE_MB) + " mb"; } else if (sizeBytes >= ONE_KB) { return nf.format(sizeBytes / (double) ONE_KB) + " kb"; } else { return sizeBytes + " B"; } } public static String formatStatus(final Status status) { switch (status) { case SUCCEEDED: return "Success"; case FAILED: return "Failed"; case RUNNING: return "Running"; case DISABLED: return "Disabled"; case KILLED: return "Killed"; case FAILED_FINISHING: return "Running w/Failure"; case PREPARING: return "Preparing"; case READY: return "Ready"; case PAUSED: return "Paused"; case SKIPPED: return "Skipped"; case KILLING: return "Killing"; default: } return "Unknown"; } /** * Gets the actual client IP address on a best effort basis as user could be sitting * behind a VPN. Get the IP by inspecting the X-Forwarded-For HTTP header or using the * provided 'remote IP address' from the low level TCP connection from the client. * * If multiple IP addresses are provided in the X-Forwarded-For header then the first one (first * hop) is used * * @param httpHeaders List of HTTP headers for the current request * @param remoteAddr The client IP address and port from the current request's TCP connection * @return The actual client IP address */ // TODO djaiswal83: Refactor this code and merge into single API public static String getRealClientIpAddr(final Map<String, String> httpHeaders, final String remoteAddr) { // If some upstream device added an X-Forwarded-For header // use it for the client ip // This will support scenarios where load balancers or gateways // front the Azkaban web server and a changing Ip address invalidates the session String clientIp = httpHeaders.getOrDefault(X_FORWARDED_FOR_HEADER, null); if (clientIp == null) { clientIp = remoteAddr; } else { // header can contain comma separated list of upstream servers - get the first one final String[] ips = clientIp.split(","); clientIp = ips[0]; } // Strip off port and only get IP address // todo: this is broken for IPv6, where e.g. a "loopback" address looks like "0:0:0:0:0:0:0:1" final String[] parts = clientIp.split(":"); clientIp = parts[0]; return clientIp; } /** * Gets the actual client IP address on a best effort basis as user could be sitting * behind a VPN. Get the IP by inspecting the X-Forwarded-For HTTP header or using the * provided 'remote IP address' from the low level TCP connection from the client. * * If multiple IP addresses are provided in the X-Forwarded-For header then the first one (first * hop) is used * * @param req HttpServletRequest * @return The actual client IP address */ public static String getRealClientIpAddr(final HttpServletRequest req) { // If some upstream device added an X-Forwarded-For header // use it for the client ip // This will support scenarios where load balancers or gateways // front the Azkaban web server and a changing Ip address invalidates // the session final HashMap<String, String> headers = new HashMap<>(); headers.put(WebUtils.X_FORWARDED_FOR_HEADER, req.getHeader(WebUtils.X_FORWARDED_FOR_HEADER.toLowerCase())); return WebUtils.getRealClientIpAddr(headers, req.getRemoteAddr()); } private static String hostName; static { try { hostName = InetAddress.getLocalHost().getCanonicalHostName(); } catch (UnknownHostException e) { hostName = "unknown"; } } /** * Report login/logout events via {@link AzkabanEventReporter}, if configured. * @param eventType login or logout * @param username if known * @param ip address of originating host * @param isSuccess AKA outcome * @param message AKA reason */ public static void reportLoginEvent(final EventType eventType, final String username, final String ip, final boolean isSuccess, final String message) { if (azkabanEventReporter != null) { final Map<String, String> metadata = new HashMap<>(); metadata.put("azkabanHost", AzkabanWebServer.getAzkabanProperties().getString(AZKABAN_SERVER_HOST_NAME, hostName)); metadata.put("sessionUser", Strings.isNullOrEmpty(username) ? "unknown" : username); metadata.put("sessionIP", ip); metadata.put("reason", message); metadata.put("appOutcome", isSuccess ? "SUCCESS" : "FAILURE"); azkabanEventReporter.report(eventType, metadata); } } public static void reportLoginEvent(final EventType eventType, final String username, final String ip) { reportLoginEvent(eventType, username, ip, true, null); } }
1
19,982
We really, really want to catch _everything_ here. It is safe to leave `azkabanEventReporter` set to null.
azkaban-azkaban
java
@@ -112,7 +112,12 @@ export function diffChildren(parentDom, newParentVNode, oldParentVNode, context, // The values only have the same type when `null`. outer: if (oldDom==null || oldDom.parentNode!==parentDom) { - parentDom.appendChild(newDom); + // Skip appendind if the last element is the same as the new dom + // node. This is necessary to prevent the last element losing focus + // if it had focus before the diff. + if (parentDom.lastChild!==newDom) { + parentDom.appendChild(newDom); + } } else { // `j<oldChildrenLength; j+=2` is an alternative to `j++<oldChildrenLength/2`
1
import { diff, unmount, applyRef } from './index'; import { coerceToVNode } from '../create-element'; import { EMPTY_OBJ, EMPTY_ARR } from '../constants'; import { removeNode } from '../util'; import { getDomSibling } from '../component'; /** * Diff the children of a virtual node * @param {import('../internal').PreactElement} parentDom The DOM element whose * children are being diffed * @param {import('../internal').VNode} newParentVNode The new virtual * node whose children should be diff'ed against oldParentVNode * @param {import('../internal').VNode} oldParentVNode The old virtual * node whose children should be diff'ed against newParentVNode * @param {object} context The current context object * @param {boolean} isSvg Whether or not this DOM node is an SVG node * @param {Array<import('../internal').PreactElement>} excessDomChildren * @param {Array<import('../internal').Component>} mounts The list of components * which have mounted * @param {Node | Text} oldDom The current attached DOM * element any new dom elements should be placed around. Likely `null` on first * render (except when hydrating). Can be a sibling DOM element when diffing * Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`. * @param {boolean} isHydrating Whether or not we are in hydration */ export function diffChildren(parentDom, newParentVNode, oldParentVNode, context, isSvg, excessDomChildren, mounts, oldDom, isHydrating) { let childVNode, i, j, oldVNode, newDom, sibDom, firstChildDom, refs; let newChildren = newParentVNode._children || toChildArray(newParentVNode.props.children, newParentVNode._children=[], coerceToVNode, true); // This is a compression of oldParentVNode!=null && oldParentVNode != EMPTY_OBJ && oldParentVNode._children || EMPTY_ARR // as EMPTY_OBJ._children should be `undefined`. let oldChildren = (oldParentVNode && oldParentVNode._children) || EMPTY_ARR; let oldChildrenLength = oldChildren.length; // Only in very specific places should this logic be invoked (top level `render` and `diffElementNodes`). // I'm using `EMPTY_OBJ` to signal when `diffChildren` is invoked in these situations. I can't use `null` // for this purpose, because `null` is a valid value for `oldDom` which can mean to skip to this logic // (e.g. if mounting a new tree in which the old DOM should be ignored (usually for Fragments). if (oldDom == EMPTY_OBJ) { if (excessDomChildren != null) { oldDom = excessDomChildren[0]; } else if (oldChildrenLength) { oldDom = getDomSibling(oldParentVNode, 0); } else { oldDom = null; } } for (i=0; i<newChildren.length; i++) { childVNode = newChildren[i] = coerceToVNode(newChildren[i]); if (childVNode!=null) { childVNode._parent = newParentVNode; childVNode._depth = newParentVNode._depth + 1; // Check if we find a corresponding element in oldChildren. // If found, delete the array item by setting to `undefined`. // We use `undefined`, as `null` is reserved for empty placeholders // (holes). oldVNode = oldChildren[i]; if (oldVNode===null || (oldVNode && childVNode.key == oldVNode.key && childVNode.type === oldVNode.type)) { oldChildren[i] = undefined; } else { // Either oldVNode === undefined or oldChildrenLength > 0, // so after this loop oldVNode == null or oldVNode is a valid value. for (j=0; j<oldChildrenLength; j++) { oldVNode = oldChildren[j]; // If childVNode is unkeyed, we only match similarly unkeyed nodes, otherwise we match by key. // We always match by type (in either case). if (oldVNode && childVNode.key == oldVNode.key && childVNode.type === oldVNode.type) { oldChildren[j] = undefined; break; } oldVNode = null; } } oldVNode = oldVNode || EMPTY_OBJ; // Morph the old element into the new one, but don't append it to the dom yet newDom = diff(parentDom, childVNode, oldVNode, context, isSvg, excessDomChildren, mounts, null, oldDom, isHydrating); if ((j = childVNode.ref) && oldVNode.ref != j) { (refs || (refs=[])).push(j, childVNode._component || newDom, childVNode); } // Only proceed if the vnode has not been unmounted by `diff()` above. if (newDom!=null) { if (firstChildDom == null) { firstChildDom = newDom; } if (childVNode._lastDomChild != null) { // Only Fragments or components that return Fragment like VNodes will // have a non-null _lastDomChild. Continue the diff from the end of // this Fragment's DOM tree. newDom = childVNode._lastDomChild; // Eagerly cleanup _lastDomChild. We don't need to persist the value because // it is only used by `diffChildren` to determine where to resume the diff after // diffing Components and Fragments. childVNode._lastDomChild = null; } else if (excessDomChildren==oldVNode || newDom!=oldDom || newDom.parentNode==null) { // NOTE: excessDomChildren==oldVNode above: // This is a compression of excessDomChildren==null && oldVNode==null! // The values only have the same type when `null`. outer: if (oldDom==null || oldDom.parentNode!==parentDom) { parentDom.appendChild(newDom); } else { // `j<oldChildrenLength; j+=2` is an alternative to `j++<oldChildrenLength/2` for (sibDom=oldDom, j=0; (sibDom=sibDom.nextSibling) && j<oldChildrenLength; j+=2) { if (sibDom==newDom) { break outer; } } parentDom.insertBefore(newDom, oldDom); } } oldDom = newDom.nextSibling; if (typeof newParentVNode.type == 'function') { // At this point, if childVNode._lastDomChild existed, then // newDom = childVNode._lastDomChild per line 101. Else it is // the same as childVNode._dom, meaning this component returned // only a single DOM node newParentVNode._lastDomChild = newDom; } } } } newParentVNode._dom = firstChildDom; // Remove children that are not part of any vnode. if (excessDomChildren!=null && typeof newParentVNode.type !== 'function') for (i=excessDomChildren.length; i--; ) if (excessDomChildren[i]!=null) removeNode(excessDomChildren[i]); // Remove remaining oldChildren if there are any. for (i=oldChildrenLength; i--; ) if (oldChildren[i]!=null) unmount(oldChildren[i], oldChildren[i]); // Set refs only after unmount if (refs) { for (i = 0; i < refs.length; i++) { applyRef(refs[i], refs[++i], refs[++i]); } } } /** * Flatten a virtual nodes children to a single dimensional array * @param {import('../index').ComponentChildren} children The unflattened * children of a virtual node * @param {Array<import('../internal').VNode | null>} [flattened] An flat array of children to modify * @param {typeof import('../create-element').coerceToVNode} [map] Function that * will be applied on each child if the `vnode` is not `null` * @param {boolean} [keepHoles] wether to coerce `undefined` to `null` or not. * This is needed for Components without children like `<Foo />`. */ export function toChildArray(children, flattened, map, keepHoles) { if (flattened == null) flattened = []; if (children==null || typeof children === 'boolean') { if (keepHoles) flattened.push(null); } else if (Array.isArray(children)) { for (let i=0; i < children.length; i++) { toChildArray(children[i], flattened, map, keepHoles); } } else { flattened.push(map ? map(children) : children); } return flattened; }
1
14,024
Can't we add the check here?
preactjs-preact
js
@@ -380,8 +380,15 @@ class RemoteConnection(object): # Authorization header headers["Authorization"] = "Basic %s" % auth - self._conn.request(method, parsed_url.path, data, headers) - resp = self._conn.getresponse() + if body and method != 'POST' and method != 'PUT': + body = None + try: + self._conn.request(method, parsed_url.path, body, headers) + resp = self._conn.getresponse() + except httplib.HTTPException: + self._conn.close() + raise + statuscode = resp.status statusmessage = resp.msg LOGGER.debug('%s %s' % (statuscode, statusmessage))
1
# Copyright 2008-2009 WebDriver committers # Copyright 2008-2009 Google Inc. # Copyright 2013 BrowserStack # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import socket import string import base64 try: import http.client as httplib except ImportError: import httplib as httplib try: from urllib import request as url_request except ImportError: import urllib2 as url_request try: from urllib import parse except ImportError: import urlparse as parse from .command import Command from .errorhandler import ErrorCode from . import utils LOGGER = logging.getLogger(__name__) class Request(url_request.Request): """ Extends the url_request.Request to support all HTTP request types. """ def __init__(self, url, data=None, method=None): """ Initialise a new HTTP request. :Args: - url - String for the URL to send the request to. - data - Data to send with the request. """ if method is None: method = data is not None and 'POST' or 'GET' elif method != 'POST' and method != 'PUT': data = None self._method = method url_request.Request.__init__(self, url, data=data) def get_method(self): """ Returns the HTTP method used by this request. """ return self._method class Response(object): """ Represents an HTTP response. """ def __init__(self, fp, code, headers, url): """ Initialise a new Response. :Args: - fp - The response body file object. - code - The HTTP status code returned by the server. - headers - A dictionary of headers returned by the server. - url - URL of the retrieved resource represented by this Response. """ self.fp = fp self.read = fp.read self.code = code self.headers = headers self.url = url def close(self): """ Close the response body file object. """ self.read = None self.fp = None def info(self): """ Returns the response headers. """ return self.headers def geturl(self): """ Returns the URL for the resource returned in this response. """ return self.url class HttpErrorHandler(url_request.HTTPDefaultErrorHandler): """ A custom HTTP error handler. Used to return Response objects instead of raising an HTTPError exception. """ def http_error_default(self, req, fp, code, msg, headers): """ Default HTTP error handler. :Args: - req - The original Request object. - fp - The response body file object. - code - The HTTP status code returned by the server. - msg - The HTTP status message returned by the server. - headers - The response headers. :Returns: A new Response object. """ return Response(fp, code, headers, req.get_full_url()) class RemoteConnection(object): """ A connection with the Remote WebDriver server. Communicates with the server using the WebDriver wire protocol: http://code.google.com/p/selenium/wiki/JsonWireProtocol """ def __init__(self, remote_server_addr): # Attempt to resolve the hostname and get an IP address. parsed_url = parse.urlparse(remote_server_addr) addr = "" if parsed_url.hostname: try: netloc = socket.gethostbyname(parsed_url.hostname) addr = netloc if parsed_url.port: netloc += ':%d' % parsed_url.port if parsed_url.username: auth = parsed_url.username if parsed_url.password: auth += ':%s' % parsed_url.password netloc = '%s@%s' % (auth, netloc) remote_server_addr = parse.urlunparse( (parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment)) except socket.gaierror: LOGGER.info('Could not get IP address for host: %s' % parsed_url.hostname) self._url = remote_server_addr self._conn = httplib.HTTPConnection(str(addr), str(parsed_url.port)) self._commands = { Command.STATUS: ('GET', '/status'), Command.NEW_SESSION: ('POST', '/session'), Command.GET_ALL_SESSIONS: ('GET', '/sessions'), Command.QUIT: ('DELETE', '/session/$sessionId'), Command.GET_CURRENT_WINDOW_HANDLE: ('GET', '/session/$sessionId/window_handle'), Command.GET_WINDOW_HANDLES: ('GET', '/session/$sessionId/window_handles'), Command.GET: ('POST', '/session/$sessionId/url'), Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'), Command.GO_BACK: ('POST', '/session/$sessionId/back'), Command.REFRESH: ('POST', '/session/$sessionId/refresh'), Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'), Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'), Command.GET_TITLE: ('GET', '/session/$sessionId/title'), Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'), Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'), Command.SET_BROWSER_VISIBLE: ('POST', '/session/$sessionId/visible'), Command.IS_BROWSER_VISIBLE: ('GET', '/session/$sessionId/visible'), Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'), Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'), Command.GET_ACTIVE_ELEMENT: ('POST', '/session/$sessionId/element/active'), Command.FIND_CHILD_ELEMENT: ('POST', '/session/$sessionId/element/$id/element'), Command.FIND_CHILD_ELEMENTS: ('POST', '/session/$sessionId/element/$id/elements'), Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'), Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'), Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'), Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'), Command.SEND_KEYS_TO_ELEMENT: ('POST', '/session/$sessionId/element/$id/value'), Command.SEND_KEYS_TO_ACTIVE_ELEMENT: ('POST', '/session/$sessionId/keys'), Command.UPLOAD_FILE: ('POST', "/session/$sessionId/file"), Command.GET_ELEMENT_VALUE: ('GET', '/session/$sessionId/element/$id/value'), Command.GET_ELEMENT_TAG_NAME: ('GET', '/session/$sessionId/element/$id/name'), Command.IS_ELEMENT_SELECTED: ('GET', '/session/$sessionId/element/$id/selected'), Command.SET_ELEMENT_SELECTED: ('POST', '/session/$sessionId/element/$id/selected'), Command.IS_ELEMENT_ENABLED: ('GET', '/session/$sessionId/element/$id/enabled'), Command.IS_ELEMENT_DISPLAYED: ('GET', '/session/$sessionId/element/$id/displayed'), Command.GET_ELEMENT_LOCATION: ('GET', '/session/$sessionId/element/$id/location'), Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW: ('GET', '/session/$sessionId/element/$id/location_in_view'), Command.GET_ELEMENT_SIZE: ('GET', '/session/$sessionId/element/$id/size'), Command.GET_ELEMENT_ATTRIBUTE: ('GET', '/session/$sessionId/element/$id/attribute/$name'), Command.ELEMENT_EQUALS: ('GET', '/session/$sessionId/element/$id/equals/$other'), Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'), Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'), Command.DELETE_ALL_COOKIES: ('DELETE', '/session/$sessionId/cookie'), Command.DELETE_COOKIE: ('DELETE', '/session/$sessionId/cookie/$name'), Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'), Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'), Command.CLOSE: ('DELETE', '/session/$sessionId/window'), Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY: ('GET', '/session/$sessionId/element/$id/css/$propertyName'), Command.IMPLICIT_WAIT: ('POST', '/session/$sessionId/timeouts/implicit_wait'), Command.EXECUTE_ASYNC_SCRIPT: ('POST', '/session/$sessionId/execute_async'), Command.SET_SCRIPT_TIMEOUT: ('POST', '/session/$sessionId/timeouts/async_script'), Command.SET_TIMEOUTS: ('POST', '/session/$sessionId/timeouts'), Command.DISMISS_ALERT: ('POST', '/session/$sessionId/dismiss_alert'), Command.ACCEPT_ALERT: ('POST', '/session/$sessionId/accept_alert'), Command.SET_ALERT_VALUE: ('POST', '/session/$sessionId/alert_text'), Command.GET_ALERT_TEXT: ('GET', '/session/$sessionId/alert_text'), Command.CLICK: ('POST', '/session/$sessionId/click'), Command.DOUBLE_CLICK: ('POST', '/session/$sessionId/doubleclick'), Command.MOUSE_DOWN: ('POST', '/session/$sessionId/buttondown'), Command.MOUSE_UP: ('POST', '/session/$sessionId/buttonup'), Command.MOVE_TO: ('POST', '/session/$sessionId/moveto'), Command.GET_WINDOW_SIZE: ('GET', '/session/$sessionId/window/$windowHandle/size'), Command.SET_WINDOW_SIZE: ('POST', '/session/$sessionId/window/$windowHandle/size'), Command.GET_WINDOW_POSITION: ('GET', '/session/$sessionId/window/$windowHandle/position'), Command.SET_WINDOW_POSITION: ('POST', '/session/$sessionId/window/$windowHandle/position'), Command.MAXIMIZE_WINDOW: ('POST', '/session/$sessionId/window/$windowHandle/maximize'), Command.SET_SCREEN_ORIENTATION: ('POST', '/session/$sessionId/orientation'), Command.GET_SCREEN_ORIENTATION: ('GET', '/session/$sessionId/orientation'), Command.SINGLE_TAP: ('POST', '/session/$sessionId/touch/click'), Command.TOUCH_DOWN: ('POST', '/session/$sessionId/touch/down'), Command.TOUCH_UP: ('POST', '/session/$sessionId/touch/up'), Command.TOUCH_MOVE: ('POST', '/session/$sessionId/touch/move'), Command.TOUCH_SCROLL: ('POST', '/session/$sessionId/touch/scroll'), Command.DOUBLE_TAP: ('POST', '/session/$sessionId/touch/doubleclick'), Command.LONG_PRESS: ('POST', '/session/$sessionId/touch/longclick'), Command.FLICK: ('POST', '/session/$sessionId/touch/flick'), Command.EXECUTE_SQL: ('POST', '/session/$sessionId/execute_sql'), Command.GET_LOCATION: ('GET', '/session/$sessionId/location'), Command.SET_LOCATION: ('POST', '/session/$sessionId/location'), Command.GET_APP_CACHE: ('GET', '/session/$sessionId/application_cache'), Command.GET_APP_CACHE_STATUS: ('GET', '/session/$sessionId/application_cache/status'), Command.CLEAR_APP_CACHE: ('DELETE', '/session/$sessionId/application_cache/clear'), Command.IS_BROWSER_ONLINE: ('GET', '/session/$sessionId/browser_connection'), Command.SET_BROWSER_ONLINE: ('POST', '/session/$sessionId/browser_connection'), Command.GET_LOCAL_STORAGE_ITEM: ('GET', '/session/$sessionId/local_storage/key/$key'), Command.REMOVE_LOCAL_STORAGE_ITEM: ('DELETE', '/session/$sessionId/local_storage/key/$key'), Command.GET_LOCAL_STORAGE_KEYS: ('GET', '/session/$sessionId/local_storage'), Command.SET_LOCAL_STORAGE_ITEM: ('POST', '/session/$sessionId/local_storage'), Command.CLEAR_LOCAL_STORAGE: ('DELETE', '/session/$sessionId/local_storage'), Command.GET_LOCAL_STORAGE_SIZE: ('GET', '/session/$sessionId/local_storage/size'), Command.GET_SESSION_STORAGE_ITEM: ('GET', '/session/$sessionId/session_storage/key/$key'), Command.REMOVE_SESSION_STORAGE_ITEM: ('DELETE', '/session/$sessionId/session_storage/key/$key'), Command.GET_SESSION_STORAGE_KEYS: ('GET', '/session/$sessionId/session_storage'), Command.SET_SESSION_STORAGE_ITEM: ('POST', '/session/$sessionId/session_storage'), Command.CLEAR_SESSION_STORAGE: ('DELETE', '/session/$sessionId/session_storage'), Command.GET_SESSION_STORAGE_SIZE: ('GET', '/session/$sessionId/session_storage/size'), Command.GET_LOG: ('POST', '/session/$sessionId/log'), Command.GET_AVAILABLE_LOG_TYPES: ('GET', '/session/$sessionId/log/types'), } def execute(self, command, params): """ Send a command to the remote server. Any path subtitutions required for the URL mapped to the command should be included in the command parameters. :Args: - command - A string specifying the command to execute. - params - A dictionary of named parameters to send with the command as its JSON payload. """ command_info = self._commands[command] assert command_info is not None, 'Unrecognised command %s' % command data = utils.dump_json(params) path = string.Template(command_info[1]).substitute(params) url = '%s%s' % (self._url, path) return self._request(url, method=command_info[0], data=data) def _request(self, url, data=None, method=None): """ Send an HTTP request to the remote server. :Args: - method - A string for the HTTP method to send the request with. - url - The URL to send the request to. - body - The message body to send. :Returns: A dictionary with the server's parsed JSON response. """ LOGGER.debug('%s %s %s' % (method, url, data)) parsed_url = parse.urlparse(url) headers = {"Connection": "keep-alive", method: parsed_url.path, "User-Agent": "Python http auth", "Content-type": "application/json;charset=\"UTF-8\"", "Accept": "application/json"} # for basic auth if parsed_url.username: auth = base64.standard_b64encode('%s:%s' % (parsed_url.username, parsed_url.password)).replace('\n', '') # Authorization header headers["Authorization"] = "Basic %s" % auth self._conn.request(method, parsed_url.path, data, headers) resp = self._conn.getresponse() statuscode = resp.status statusmessage = resp.msg LOGGER.debug('%s %s' % (statuscode, statusmessage)) data = resp.read() try: if 399 < statuscode < 500: return {'status': statuscode, 'value': data} if 300 <= statuscode < 304: return self._request(resp.getheader('location'), method='GET') body = data.decode('utf-8').replace('\x00', '').strip() content_type = [] if resp.getheader('Content-Type') is not None: content_type = resp.getheader('Content-Type').split(';') if not any([x.startswith('image/png') for x in content_type]): try: data = utils.load_json(body.strip()) except ValueError: if 199 < statuscode < 300: status = ErrorCode.SUCCESS else: status = ErrorCode.UNKNOWN_ERROR return {'status': status, 'value': body.strip()} assert type(data) is dict, ( 'Invalid server response body: %s' % body) assert 'status' in data, ( 'Invalid server response; no status: %s' % body) # Some of the drivers incorrectly return a response # with no 'value' field when they should return null. if 'value' not in data: data['value'] = None return data else: data = {'status': 0, 'value': body.strip()} return data finally: LOGGER.debug("Finished Request") resp.close()
1
10,707
body is being used here for the first time without every being populated. This will error. To run tests do `./go clean test_py` and that will run the Firefox tests
SeleniumHQ-selenium
js
@@ -40,6 +40,8 @@ type GenericDeploymentSpec struct { // The maximum length of time to execute deployment before giving up. // Default is 6h. Timeout Duration `json:"timeout,omitempty" default:"6h"` + // List of encrypted secrets and targets that should be decoded before using. + Encryption *SecretEncryption `json:"encryption"` } func (s *GenericDeploymentSpec) Validate() error {
1
// Copyright 2020 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "encoding/json" "fmt" "time" "github.com/pipe-cd/pipe/pkg/model" ) const ( defaultWaitApprovalTimeout = Duration(6 * time.Hour) defaultAnalysisQueryTimeout = Duration(30 * time.Second) ) type GenericDeploymentSpec struct { // Forcibly use QuickSync or Pipeline when commit message matched the specified pattern. CommitMatcher DeploymentCommitMatcher `json:"commitMatcher"` // Pipeline for deploying progressively. Pipeline *DeploymentPipeline `json:"pipeline"` // The list of sealed secrets that should be decrypted. SealedSecrets []SealedSecretMapping `json:"sealedSecrets"` // List of directories or files where their changes will trigger the deployment. // Regular expression can be used. TriggerPaths []string `json:"triggerPaths,omitempty"` // The maximum length of time to execute deployment before giving up. // Default is 6h. Timeout Duration `json:"timeout,omitempty" default:"6h"` } func (s *GenericDeploymentSpec) Validate() error { if s.Pipeline != nil { for _, stage := range s.Pipeline.Stages { if stage.AnalysisStageOptions != nil { if err := stage.AnalysisStageOptions.Validate(); err != nil { return err } } } } return nil } func (s GenericDeploymentSpec) GetStage(index int32) (PipelineStage, bool) { if s.Pipeline == nil { return PipelineStage{}, false } if int(index) >= len(s.Pipeline.Stages) { return PipelineStage{}, false } return s.Pipeline.Stages[index], true } // HasStage checks if the given stage is included in the pipeline. func (s GenericDeploymentSpec) HasStage(stage model.Stage) bool { if s.Pipeline == nil { return false } for _, s := range s.Pipeline.Stages { if s.Name == stage { return true } } return false } // DeploymentCommitMatcher provides a way to decide how to deploy. type DeploymentCommitMatcher struct { // It makes sure to perform syncing if the commit message matches this regular expression. QuickSync string `json:"quickSync"` // It makes sure to perform pipeline if the commit message matches this regular expression. Pipeline string `json:"pipeline"` } // DeploymentPipeline represents the way to deploy the application. // The pipeline is triggered by changes in any of the following objects: // - Target PodSpec (Target can be Deployment, DaemonSet, StatefulSet) // - ConfigMaps, Secrets that are mounted as volumes or envs in the deployment. type DeploymentPipeline struct { Stages []PipelineStage `json:"stages"` } // PipelineStage represents a single stage of a pipeline. // This is used as a generic struct for all stage type. type PipelineStage struct { Id string Name model.Stage Desc string Timeout Duration WaitStageOptions *WaitStageOptions WaitApprovalStageOptions *WaitApprovalStageOptions AnalysisStageOptions *AnalysisStageOptions K8sPrimaryRolloutStageOptions *K8sPrimaryRolloutStageOptions K8sCanaryRolloutStageOptions *K8sCanaryRolloutStageOptions K8sCanaryCleanStageOptions *K8sCanaryCleanStageOptions K8sBaselineRolloutStageOptions *K8sBaselineRolloutStageOptions K8sBaselineCleanStageOptions *K8sBaselineCleanStageOptions K8sTrafficRoutingStageOptions *K8sTrafficRoutingStageOptions TerraformSyncStageOptions *TerraformSyncStageOptions TerraformPlanStageOptions *TerraformPlanStageOptions TerraformApplyStageOptions *TerraformApplyStageOptions CloudRunSyncStageOptions *CloudRunSyncStageOptions CloudRunPromoteStageOptions *CloudRunPromoteStageOptions LambdaSyncStageOptions *LambdaSyncStageOptions LambdaCanaryRolloutStageOptions *LambdaCanaryRolloutStageOptions LambdaPromoteStageOptions *LambdaPromoteStageOptions ECSSyncStageOptions *ECSSyncStageOptions ECSCanaryRolloutStageOptions *ECSCanaryRolloutStageOptions ECSPrimaryRolloutStageOptions *ECSPrimaryRolloutStageOptions ECSCanaryCleanStageOptions *ECSCanaryCleanStageOptions ECSTrafficRoutingStageOptions *ECSTrafficRoutingStageOptions } type genericPipelineStage struct { Id string `json:"id"` Name model.Stage `json:"name"` Desc string `json:"desc,omitempty"` Timeout Duration `json:"timeout"` With json.RawMessage `json:"with"` } func (s *PipelineStage) UnmarshalJSON(data []byte) error { var err error gs := genericPipelineStage{} if err = json.Unmarshal(data, &gs); err != nil { return err } s.Id = gs.Id s.Name = gs.Name s.Desc = gs.Desc s.Timeout = gs.Timeout switch s.Name { case model.StageWait: s.WaitStageOptions = &WaitStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.WaitStageOptions) } case model.StageWaitApproval: s.WaitApprovalStageOptions = &WaitApprovalStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.WaitApprovalStageOptions) } if s.WaitApprovalStageOptions.Timeout <= 0 { s.WaitApprovalStageOptions.Timeout = defaultWaitApprovalTimeout } case model.StageAnalysis: s.AnalysisStageOptions = &AnalysisStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.AnalysisStageOptions) } for i := 0; i < len(s.AnalysisStageOptions.Metrics); i++ { if s.AnalysisStageOptions.Metrics[i].Timeout <= 0 { s.AnalysisStageOptions.Metrics[i].Timeout = defaultAnalysisQueryTimeout } } case model.StageK8sPrimaryRollout: s.K8sPrimaryRolloutStageOptions = &K8sPrimaryRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sPrimaryRolloutStageOptions) } case model.StageK8sCanaryRollout: s.K8sCanaryRolloutStageOptions = &K8sCanaryRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sCanaryRolloutStageOptions) } case model.StageK8sCanaryClean: s.K8sCanaryCleanStageOptions = &K8sCanaryCleanStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sCanaryCleanStageOptions) } case model.StageK8sBaselineRollout: s.K8sBaselineRolloutStageOptions = &K8sBaselineRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sBaselineRolloutStageOptions) } case model.StageK8sBaselineClean: s.K8sBaselineCleanStageOptions = &K8sBaselineCleanStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sBaselineCleanStageOptions) } case model.StageK8sTrafficRouting: s.K8sTrafficRoutingStageOptions = &K8sTrafficRoutingStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sTrafficRoutingStageOptions) } case model.StageTerraformSync: s.TerraformSyncStageOptions = &TerraformSyncStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.TerraformSyncStageOptions) } case model.StageTerraformPlan: s.TerraformPlanStageOptions = &TerraformPlanStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.TerraformPlanStageOptions) } case model.StageTerraformApply: s.TerraformApplyStageOptions = &TerraformApplyStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.TerraformApplyStageOptions) } case model.StageCloudRunSync: s.CloudRunSyncStageOptions = &CloudRunSyncStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.CloudRunSyncStageOptions) } case model.StageCloudRunPromote: s.CloudRunPromoteStageOptions = &CloudRunPromoteStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.CloudRunPromoteStageOptions) } case model.StageLambdaSync: s.LambdaSyncStageOptions = &LambdaSyncStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.LambdaSyncStageOptions) } case model.StageLambdaPromote: s.LambdaPromoteStageOptions = &LambdaPromoteStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.LambdaPromoteStageOptions) } case model.StageLambdaCanaryRollout: s.LambdaCanaryRolloutStageOptions = &LambdaCanaryRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.LambdaCanaryRolloutStageOptions) } case model.StageECSSync: s.ECSSyncStageOptions = &ECSSyncStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.ECSSyncStageOptions) } case model.StageECSCanaryRollout: s.ECSCanaryRolloutStageOptions = &ECSCanaryRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.ECSCanaryRolloutStageOptions) } case model.StageECSPrimaryRollout: s.ECSPrimaryRolloutStageOptions = &ECSPrimaryRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.ECSPrimaryRolloutStageOptions) } case model.StageECSCanaryClean: s.ECSCanaryCleanStageOptions = &ECSCanaryCleanStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.ECSCanaryCleanStageOptions) } case model.StageECSTrafficRouting: s.ECSTrafficRoutingStageOptions = &ECSTrafficRoutingStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.ECSTrafficRoutingStageOptions) } default: err = fmt.Errorf("unsupported stage name: %s", s.Name) } return err } // WaitStageOptions contains all configurable values for a WAIT stage. type WaitStageOptions struct { Duration Duration `json:"duration"` } // WaitStageOptions contains all configurable values for a WAIT_APPROVAL stage. type WaitApprovalStageOptions struct { // The maximum length of time to wait before giving up. // Defaults to 6h. Timeout Duration `json:"timeout"` Approvers []string `json:"approvers"` } // AnalysisStageOptions contains all configurable values for a K8S_ANALYSIS stage. type AnalysisStageOptions struct { // How long the analysis process should be executed. Duration Duration `json:"duration"` // TODO: Consider about how to handle a pod restart // possible count of pod restarting RestartThreshold int `json:"restartThreshold"` Metrics []TemplatableAnalysisMetrics `json:"metrics"` Logs []TemplatableAnalysisLog `json:"logs"` Https []TemplatableAnalysisHTTP `json:"https"` Dynamic AnalysisDynamic `json:"dynamic"` } func (a *AnalysisStageOptions) Validate() error { if a.Duration == 0 { return fmt.Errorf("the ANALYSIS stage requires duration field") } return nil } type AnalysisTemplateRef struct { Name string `json:"name"` Args map[string]string `json:"args"` } // TemplatableAnalysisMetrics wraps AnalysisMetrics to allow specify template to use. type TemplatableAnalysisMetrics struct { AnalysisMetrics Template AnalysisTemplateRef `json:"template"` } // TemplatableAnalysisLog wraps AnalysisLog to allow specify template to use. type TemplatableAnalysisLog struct { AnalysisLog Template AnalysisTemplateRef `json:"template"` } // TemplatableAnalysisHTTP wraps AnalysisHTTP to allow specify template to use. type TemplatableAnalysisHTTP struct { AnalysisHTTP Template AnalysisTemplateRef `json:"template"` } type SealedSecretMapping struct { // Relative path from the application directory to sealed secret file. Path string `json:"path"` // The filename for the decrypted secret. // Empty means the same name with the sealed secret file. OutFilename string `json:"outFilename"` // The directory name where to put the decrypted secret. // Empty means the same directory with the sealed secret file. OutDir string `json:"outDir"` }
1
17,285
Tell me if you have a better field name for this.
pipe-cd-pipe
go
@@ -1084,8 +1084,8 @@ partial class Build "Samples.AspNetCoreMvc30" => Framework == TargetFramework.NETCOREAPP3_0, "Samples.AspNetCoreMvc31" => Framework == TargetFramework.NETCOREAPP3_1, "Samples.AspNetCore2" => Framework == TargetFramework.NETCOREAPP2_1, - "Samples.AspNetCore5" => Framework == TargetFramework.NET5_0 || Framework == TargetFramework.NETCOREAPP3_1 || Framework == TargetFramework.NETCOREAPP3_0, - "Samples.GraphQL4" => Framework == TargetFramework.NETCOREAPP3_1 || Framework == TargetFramework.NET5_0, + "Samples.AspNetCore5" => Framework == TargetFramework.NET6_0 || Framework == TargetFramework.NET5_0 || Framework == TargetFramework.NETCOREAPP3_1 || Framework == TargetFramework.NETCOREAPP3_0, + "Samples.GraphQL4" => Framework == TargetFramework.NETCOREAPP3_1 || Framework == TargetFramework.NET5_0 || Framework == TargetFramework.NET6_0, var name when projectsToSkip.Contains(name) => false, var name when multiPackageProjects.Contains(name) => false, "Samples.AspNetCoreRazorPages" => true,
1
using System; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; using System.Net; using System.Runtime.InteropServices; using System.Text.Json; using System.Text.RegularExpressions; using Nuke.Common; using Nuke.Common.IO; using Nuke.Common.ProjectModel; using Nuke.Common.Tooling; using Nuke.Common.Tools.DotNet; using Nuke.Common.Tools.MSBuild; using Nuke.Common.Tools.NuGet; using Nuke.Common.Utilities.Collections; using static Nuke.Common.EnvironmentInfo; using static Nuke.Common.IO.FileSystemTasks; using static Nuke.Common.IO.PathConstruction; using static Nuke.Common.IO.CompressionTasks; using static Nuke.Common.Tools.DotNet.DotNetTasks; using static Nuke.Common.Tools.MSBuild.MSBuildTasks; using static CustomDotNetTasks; // #pragma warning disable SA1306 // #pragma warning disable SA1134 // #pragma warning disable SA1111 // #pragma warning disable SA1400 // #pragma warning disable SA1401 partial class Build { [Solution("Datadog.Trace.sln")] readonly Solution Solution; AbsolutePath TracerDirectory => RootDirectory / "tracer"; AbsolutePath SharedDirectory => RootDirectory / "shared"; AbsolutePath ProfilerDirectory => ProfilerSrcDirectory ?? RootDirectory / ".." / "dd-continuous-profiler-dotnet"; AbsolutePath MsBuildProject => TracerDirectory / "Datadog.Trace.proj"; AbsolutePath OutputDirectory => TracerDirectory / "bin"; AbsolutePath TracerHomeDirectory => TracerHome ?? (OutputDirectory / "tracer-home"); AbsolutePath SymbolsDirectory => TracerHome ?? (OutputDirectory / "symbols"); AbsolutePath DDTracerHomeDirectory => DDTracerHome ?? (OutputDirectory / "dd-tracer-home"); AbsolutePath ArtifactsDirectory => Artifacts ?? (OutputDirectory / "artifacts"); AbsolutePath WindowsTracerHomeZip => ArtifactsDirectory / "windows-tracer-home.zip"; AbsolutePath WindowsSymbolsZip => ArtifactsDirectory / "windows-native-symbols.zip"; AbsolutePath BuildDataDirectory => TracerDirectory / "build_data"; AbsolutePath MonitoringHomeDirectory => MonitoringHome ?? (SharedDirectory / "bin" / "monitoring-home"); AbsolutePath ProfilerHomeDirectory => ProfilerHome ?? RootDirectory / ".." / "_build" / "DDProf-Deploy"; const string LibDdwafVersion = "1.0.10"; AbsolutePath LibDdwafDirectory => (NugetPackageDirectory ?? (RootDirectory / "packages")) / $"libddwaf.{LibDdwafVersion}"; AbsolutePath SourceDirectory => TracerDirectory / "src"; AbsolutePath BuildDirectory => TracerDirectory / "build"; AbsolutePath TestsDirectory => TracerDirectory / "test"; AbsolutePath DistributionHomeDirectory => Solution.GetProject(Projects.DatadogMonitoringDistribution).Directory / "home"; AbsolutePath TempDirectory => (AbsolutePath)(IsWin ? Path.GetTempPath() : "/tmp/"); string TracerLogDirectory => IsWin ? Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData), "Datadog .NET Tracer", "logs") : "/var/log/datadog/dotnet/"; Project NativeProfilerProject => Solution.GetProject(Projects.ClrProfilerNative); Project NativeLoaderProject => Solution.GetProject(Projects.NativeLoader); [LazyPathExecutable(name: "cmake")] readonly Lazy<Tool> CMake; [LazyPathExecutable(name: "make")] readonly Lazy<Tool> Make; [LazyPathExecutable(name: "fpm")] readonly Lazy<Tool> Fpm; [LazyPathExecutable(name: "gzip")] readonly Lazy<Tool> GZip; [LazyPathExecutable(name: "cmd")] readonly Lazy<Tool> Cmd; IEnumerable<MSBuildTargetPlatform> ArchitecturesForPlatform => Equals(TargetPlatform, MSBuildTargetPlatform.x64) ? new[] { MSBuildTargetPlatform.x64, MSBuildTargetPlatform.x86 } : new[] { MSBuildTargetPlatform.x86 }; bool IsArm64 => RuntimeInformation.ProcessArchitecture == Architecture.Arm64; string LinuxArchitectureIdentifier => IsArm64 ? "arm64" : TargetPlatform.ToString(); IEnumerable<string> LinuxPackageTypes => IsAlpine ? new[] { "tar" } : new[] { "deb", "rpm", "tar" }; IEnumerable<Project> ProjectsToPack => new[] { Solution.GetProject(Projects.DatadogTrace), Solution.GetProject(Projects.DatadogTraceOpenTracing), }; Project[] ParallelIntegrationTests => new[] { Solution.GetProject(Projects.TraceIntegrationTests), Solution.GetProject(Projects.OpenTracingIntegrationTests), }; Project[] ClrProfilerIntegrationTests => new[] { Solution.GetProject(Projects.ClrProfilerIntegrationTests), Solution.GetProject(Projects.AppSecIntegrationTests), }; readonly IEnumerable<TargetFramework> TargetFrameworks = new[] { TargetFramework.NET461, TargetFramework.NETSTANDARD2_0, TargetFramework.NETCOREAPP3_1, }; Target CreateRequiredDirectories => _ => _ .Unlisted() .Executes(() => { EnsureExistingDirectory(TracerHomeDirectory); EnsureExistingDirectory(ArtifactsDirectory); EnsureExistingDirectory(DDTracerHomeDirectory); EnsureExistingDirectory(BuildDataDirectory); }); Target Restore => _ => _ .After(Clean) .Unlisted() .Executes(() => { if (IsWin) { NuGetTasks.NuGetRestore(s => s .SetTargetPath(Solution) .SetVerbosity(NuGetVerbosity.Normal) .When(!string.IsNullOrEmpty(NugetPackageDirectory), o => o.SetPackagesDirectory(NugetPackageDirectory))); } else { DotNetRestore(s => s .SetProjectFile(Solution) .SetVerbosity(DotNetVerbosity.Normal) // .SetTargetPlatform(Platform) // necessary to ensure we restore every project .SetProperty("configuration", BuildConfiguration.ToString()) .When(!string.IsNullOrEmpty(NugetPackageDirectory), o => o.SetPackageDirectory(NugetPackageDirectory))); } }); Target CompileNativeSrcWindows => _ => _ .Unlisted() .After(CompileManagedSrc) .OnlyWhenStatic(() => IsWin) .Executes(() => { // If we're building for x64, build for x86 too var platforms = Equals(TargetPlatform, MSBuildTargetPlatform.x64) ? new[] { MSBuildTargetPlatform.x64, MSBuildTargetPlatform.x86 } : new[] { MSBuildTargetPlatform.x86 }; // Can't use dotnet msbuild, as needs to use the VS version of MSBuild // Build native tracer assets MSBuild(s => s .SetTargetPath(MsBuildProject) .SetConfiguration(BuildConfiguration) .SetMSBuildPath() .SetTargets("BuildCppSrc") .DisableRestore() .SetMaxCpuCount(null) .CombineWith(platforms, (m, platform) => m .SetTargetPlatform(platform))); }); Target CompileNativeSrcLinux => _ => _ .Unlisted() .After(CompileManagedSrc) .OnlyWhenStatic(() => IsLinux) .Executes(() => { var buildDirectory = NativeProfilerProject.Directory / "build"; EnsureExistingDirectory(buildDirectory); CMake.Value( arguments: "../ -DCMAKE_BUILD_TYPE=Release", workingDirectory: buildDirectory); Make.Value(workingDirectory: buildDirectory); }); Target CompileNativeSrcMacOs => _ => _ .Unlisted() .After(CompileManagedSrc) .OnlyWhenStatic(() => IsOsx) .Executes(() => { var nativeProjectDirectory = NativeProfilerProject.Directory; CMake.Value(arguments: ".", workingDirectory: nativeProjectDirectory); Make.Value(workingDirectory: nativeProjectDirectory); }); Target CompileNativeSrc => _ => _ .Unlisted() .Description("Compiles the native loader") .DependsOn(CompileNativeSrcWindows) .DependsOn(CompileNativeSrcMacOs) .DependsOn(CompileNativeSrcLinux); Target CompileManagedSrc => _ => _ .Unlisted() .Description("Compiles the managed code in the src directory") .After(CreateRequiredDirectories) .After(Restore) .Executes(() => { // Always AnyCPU DotNetMSBuild(x => x .SetTargetPath(MsBuildProject) .SetTargetPlatformAnyCPU() .SetConfiguration(BuildConfiguration) .DisableRestore() .SetTargets("BuildCsharpSrc") ); }); Target CompileNativeTestsWindows => _ => _ .Unlisted() .After(CompileNativeSrc) .OnlyWhenStatic(() => IsWin) .Executes(() => { // If we're building for x64, build for x86 too var platforms = Equals(TargetPlatform, MSBuildTargetPlatform.x64) ? new[] { MSBuildTargetPlatform.x64, MSBuildTargetPlatform.x86 } : new[] { MSBuildTargetPlatform.x86 }; // Can't use dotnet msbuild, as needs to use the VS version of MSBuild MSBuild(s => s .SetTargetPath(MsBuildProject) .SetConfiguration(BuildConfiguration) .SetMSBuildPath() .SetTargets("BuildCppTests") .DisableRestore() .SetMaxCpuCount(null) .CombineWith(platforms, (m, platform) => m .SetTargetPlatform(platform))); }); Target CompileNativeTestsLinux => _ => _ .Unlisted() .After(CompileNativeSrc) .OnlyWhenStatic(() => IsLinux) .Executes(() => { Logger.Error("We don't currently run unit tests on Linux"); }); Target CompileNativeTests => _ => _ .Unlisted() .Description("Compiles the native loader unit tests") .DependsOn(CompileNativeTestsWindows) .DependsOn(CompileNativeTestsLinux); Target DownloadLibDdwaf => _ => _ .Unlisted() .After(CreateRequiredDirectories) .Executes(() => { var wc = new WebClient(); var libDdwafUri = new Uri($"https://www.nuget.org/api/v2/package/libddwaf/{LibDdwafVersion}"); var libDdwafZip = TempDirectory / "libddwaf.zip"; wc.DownloadFile(libDdwafUri, libDdwafZip); Console.WriteLine($"{libDdwafZip} downloaded. Extracting to {LibDdwafDirectory}..."); UncompressZip(libDdwafZip, LibDdwafDirectory); }); Target CopyLibDdwaf => _ => _ .Unlisted() .After(Clean) .After(DownloadLibDdwaf) .OnlyWhenStatic(() => !IsArm64) // not supported yet .Executes(() => { if (IsWin) { foreach (var architecture in new[] {"win-x86", "win-x64"}) { var source = LibDdwafDirectory / "runtimes" / architecture / "native" / "ddwaf.dll"; var dest = TracerHomeDirectory / architecture; Logger.Info($"Copying '{source}' to '{dest}'"); CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite); } } else { var (architecture, ext) = GetUnixArchitectureAndExtension(includeMuslSuffixOnAlpine: true); var ddwafFileName = $"libddwaf.{ext}"; var source = LibDdwafDirectory / "runtimes" / architecture / "native" / ddwafFileName; var dest = TracerHomeDirectory; Logger.Info($"Copying '{source}' to '{dest}'"); CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite); } }); Target PublishManagedProfiler => _ => _ .Unlisted() .After(CompileManagedSrc) .Executes(() => { var targetFrameworks = IsWin ? TargetFrameworks : TargetFrameworks.Where(framework => !framework.ToString().StartsWith("net4")); // Publish Datadog.Trace.MSBuild which includes Datadog.Trace and Datadog.Trace.AspNet DotNetPublish(s => s .SetProject(Solution.GetProject(Projects.DatadogTraceMsBuild)) .SetConfiguration(BuildConfiguration) .SetTargetPlatformAnyCPU() .EnableNoBuild() .EnableNoRestore() .CombineWith(targetFrameworks, (p, framework) => p .SetFramework(framework) .SetOutput(TracerHomeDirectory / framework))); }); Target PublishNativeSymbolsWindows => _ => _ .Unlisted() .OnlyWhenStatic(() => IsWin) .After(CompileNativeSrc, PublishManagedProfiler) .Executes(() => { foreach (var architecture in ArchitecturesForPlatform) { var source = NativeProfilerProject.Directory / "bin" / BuildConfiguration / architecture.ToString() / $"{NativeProfilerProject.Name}.pdb"; var dest = SymbolsDirectory / $"win-{architecture}"; Logger.Info($"Copying '{source}' to '{dest}'"); CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite); } }); Target PublishNativeProfilerWindows => _ => _ .Unlisted() .OnlyWhenStatic(() => IsWin) .After(CompileNativeSrc, PublishManagedProfiler) .Executes(() => { foreach (var architecture in ArchitecturesForPlatform) { // Copy native tracer assets var source = NativeProfilerProject.Directory / "bin" / BuildConfiguration / architecture.ToString() / $"{NativeProfilerProject.Name}.dll"; var dest = TracerHomeDirectory / $"win-{architecture}"; Logger.Info($"Copying '{source}' to '{dest}'"); CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite); } }); Target PublishNativeProfilerLinux => _ => _ .Unlisted() .OnlyWhenStatic(() => IsLinux) .After(CompileNativeSrc, PublishManagedProfiler) .Executes(() => { // copy createLogPath.sh CopyFileToDirectory( BuildDirectory / "artifacts" / "createLogPath.sh", TracerHomeDirectory, FileExistsPolicy.Overwrite); // Copy Native file CopyFileToDirectory( NativeProfilerProject.Directory / "build" / "bin" / $"{NativeProfilerProject.Name}.so", TracerHomeDirectory, FileExistsPolicy.Overwrite); }); Target PublishNativeProfilerMacOs => _ => _ .Unlisted() .OnlyWhenStatic(() => IsOsx) .After(CompileNativeSrc, PublishManagedProfiler) .Executes(() => { // copy createLogPath.sh CopyFileToDirectory( BuildDirectory / "artifacts" / "createLogPath.sh", TracerHomeDirectory, FileExistsPolicy.Overwrite); // Create home directory CopyFileToDirectory( NativeProfilerProject.Directory / "bin" / $"{NativeProfilerProject.Name}.dylib", TracerHomeDirectory, FileExistsPolicy.Overwrite); }); Target PublishNativeProfiler => _ => _ .Unlisted() .DependsOn(PublishNativeProfilerWindows) .DependsOn(PublishNativeProfilerLinux) .DependsOn(PublishNativeProfilerMacOs); Target CreateDdTracerHome => _ => _ .Unlisted() .After(PublishNativeProfiler, PublishManagedProfiler, DownloadLibDdwaf, CopyLibDdwaf) .Executes(() => { // start by copying everything from the tracer home dir CopyDirectoryRecursively(TracerHomeDirectory, DDTracerHomeDirectory, DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite); if (IsWin) { // windows already has the expected layout return; } // Move the native file to the architecture-specific folder var (architecture, ext) = GetUnixArchitectureAndExtension(includeMuslSuffixOnAlpine: false); var profilerFileName = $"{NativeProfilerProject.Name}.{ext}"; var ddwafFileName = $"libddwaf.{ext}"; var outputDir = DDTracerHomeDirectory / architecture; EnsureCleanDirectory(outputDir); MoveFile( DDTracerHomeDirectory / profilerFileName, outputDir / profilerFileName); // won't exist yet for arm64 builds var srcDdwafFile = DDTracerHomeDirectory / ddwafFileName; if (File.Exists(srcDdwafFile)) { MoveFile( srcDdwafFile, DDTracerHomeDirectory / architecture / ddwafFileName); } }); Target BuildMsi => _ => _ .Unlisted() .Description("Builds the .msi files from the compiled tracer home directory") .After(BuildTracerHome) .OnlyWhenStatic(() => IsWin) .Executes(() => { MSBuild(s => s .SetTargetPath(Solution.GetProject(Projects.WindowsInstaller)) .SetConfiguration(BuildConfiguration) .SetMSBuildPath() .AddProperty("RunWixToolsOutOfProc", true) .SetProperty("TracerHomeDirectory", TracerHomeDirectory) .SetProperty("LibDdwafDirectory", LibDdwafDirectory) .SetMaxCpuCount(null) .CombineWith(ArchitecturesForPlatform, (o, arch) => o .SetProperty("MsiOutputPath", ArtifactsDirectory / arch.ToString()) .SetTargetPlatform(arch)), degreeOfParallelism: 2); }); Target BuildMsiBeta => _ => _ .Unlisted() .Description("Builds the .msi files from the repo") .After(BuildTracerHome, BuildProfilerHome, BuildMonitoringHome) .OnlyWhenStatic(() => IsWin) .Executes(() => { MSBuild(s => s .SetTargetPath(SharedDirectory / "src" / "msi-installer" / "WindowsInstaller.wixproj") .SetConfiguration(BuildConfiguration) .SetMSBuildPath() .AddProperty("RunWixToolsOutOfProc", true) .SetProperty("TracerHomeDirectory", TracerHomeDirectory) .SetProperty("LibDdwafDirectory", LibDdwafDirectory) .SetProperty("ProfilerHomeDirectory", ProfilerHomeDirectory) .SetProperty("MonitoringHomeDirectory", MonitoringHomeDirectory) .SetProperty("BetaMsiSuffix", BetaMsiSuffix) .SetMaxCpuCount(null) .CombineWith(ArchitecturesForPlatform, (o, arch) => o .SetProperty("MsiOutputPath", ArtifactsDirectory / arch.ToString()) .SetTargetPlatform(arch)), degreeOfParallelism: 2); }); Target CreateDistributionHome => _ => _ .Unlisted() .After(BuildTracerHome) .Executes(() => { // Copy existing files from tracer home to the Distribution location CopyDirectoryRecursively(TracerHomeDirectory, DistributionHomeDirectory, DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite); // Ensure createLogPath.sh is copied to the directory CopyFileToDirectory( BuildDirectory / "artifacts" / "createLogPath.sh", DistributionHomeDirectory, FileExistsPolicy.Overwrite); }); /// <summary> /// This target is a bit of a hack, but means that we actually use the All CPU builds in intgration tests etc /// </summary> Target CreatePlatformlessSymlinks => _ => _ .Description("Copies the build output from 'All CPU' platforms to platform-specific folders") .Unlisted() .After(CompileManagedSrc) .After(CompileDependencyLibs) .After(CompileManagedTestHelpers) .Executes(() => { // create junction for each directory var directories = TracerDirectory.GlobDirectories( $"src/**/bin/{BuildConfiguration}", $"tools/**/bin/{BuildConfiguration}", $"test/Datadog.Trace.TestHelpers/**/bin/{BuildConfiguration}", $"test/test-applications/integrations/dependency-libs/**/bin/{BuildConfiguration}" ); directories.ForEach(existingDir => { var newDir = existingDir.Parent / $"{TargetPlatform}" / BuildConfiguration; if (DirectoryExists(newDir)) { Logger.Info($"Skipping '{newDir}' as already exists"); } else { EnsureExistingDirectory(newDir.Parent); Cmd.Value(arguments: $"cmd /c mklink /J \"{newDir}\" \"{existingDir}\""); } }); }); Target ZipSymbols => _ => _ .Unlisted() .After(BuildTracerHome) .DependsOn(PublishNativeSymbolsWindows) .OnlyWhenStatic(() => IsWin) .Executes(() => { CompressZip(SymbolsDirectory, WindowsSymbolsZip, fileMode: FileMode.Create); }); Target ZipTracerHome => _ => _ .Unlisted() .After(BuildTracerHome) .Requires(() => Version) .Executes(() => { if (IsWin) { CompressZip(TracerHomeDirectory, WindowsTracerHomeZip, fileMode: FileMode.Create); } else if (IsLinux) { var fpm = Fpm.Value; var gzip = GZip.Value; var packageName = "datadog-dotnet-apm"; var workingDirectory = ArtifactsDirectory / $"linux-{LinuxArchitectureIdentifier}"; EnsureCleanDirectory(workingDirectory); foreach (var packageType in LinuxPackageTypes) { var args = new List<string>() { "-f", "-s dir", $"-t {packageType}", $"-n {packageName}", $"-v {Version}", packageType == "tar" ? "" : "--prefix /opt/datadog", $"--chdir {TracerHomeDirectory}", "netstandard2.0/", "netcoreapp3.1/", "Datadog.Trace.ClrProfiler.Native.so", "createLogPath.sh", }; if (!IsArm64) { args.Add("libddwaf.so"); } var arguments = string.Join(" ", args); fpm(arguments, workingDirectory: workingDirectory); } gzip($"-f {packageName}.tar", workingDirectory: workingDirectory); var suffix = RuntimeInformation.ProcessArchitecture == Architecture.X64 ? string.Empty : $".{RuntimeInformation.ProcessArchitecture.ToString().ToLower()}"; var versionedName = IsAlpine ? $"{packageName}-{Version}-musl{suffix}.tar.gz" : $"{packageName}-{Version}{suffix}.tar.gz"; RenameFile( workingDirectory / $"{packageName}.tar.gz", workingDirectory / versionedName); } }); Target CompileManagedTestHelpers => _ => _ .Unlisted() .After(Restore) .After(CompileManagedSrc) .Executes(() => { // Always AnyCPU DotNetMSBuild(x => x .SetTargetPath(MsBuildProject) .SetConfiguration(BuildConfiguration) .SetTargetPlatformAnyCPU() .DisableRestore() .SetProperty("BuildProjectReferences", false) .SetTargets("BuildCsharpTestHelpers")); }); Target CompileManagedUnitTests => _ => _ .Unlisted() .After(Restore) .After(CompileManagedSrc) .Executes(() => { // Always AnyCPU DotNetMSBuild(x => x .SetTargetPath(MsBuildProject) .SetConfiguration(BuildConfiguration) .SetTargetPlatformAnyCPU() .DisableRestore() .SetProperty("BuildProjectReferences", false) .SetTargets("BuildCsharpUnitTests")); }); Target RunManagedUnitTests => _ => _ .Unlisted() .After(CompileManagedUnitTests) .Executes(() => { var testProjects = TracerDirectory.GlobFiles("test/**/*.Tests.csproj") .Select(x => Solution.GetProject(x)) .ToList(); testProjects.ForEach(EnsureResultsDirectory); try { DotNetTest(x => x .EnableNoRestore() .EnableNoBuild() .SetConfiguration(BuildConfiguration) .SetTargetPlatformAnyCPU() .SetDDEnvironmentVariables("dd-tracer-dotnet") .When(CodeCoverage, ConfigureCodeCoverage) .CombineWith(testProjects, (x, project) => x .EnableTrxLogOutput(GetResultsDirectory(project)) .SetProjectFile(project))); } finally { MoveLogsToBuildData(); } }); Target RunNativeTestsWindows => _ => _ .Unlisted() .After(CompileNativeSrcWindows) .After(CompileNativeTestsWindows) .OnlyWhenStatic(() => IsWin) .Executes(() => { var workingDirectory = TestsDirectory / "Datadog.Trace.ClrProfiler.Native.Tests" / "bin" / BuildConfiguration.ToString() / TargetPlatform.ToString(); var exePath = workingDirectory / "Datadog.Trace.ClrProfiler.Native.Tests.exe"; var testExe = ToolResolver.GetLocalTool(exePath); testExe("--gtest_output=xml", workingDirectory: workingDirectory); }); Target RunNativeTestsLinux => _ => _ .Unlisted() .After(CompileNativeSrcLinux) .After(CompileNativeTestsLinux) .OnlyWhenStatic(() => IsLinux) .Executes(() => { Logger.Error("We don't currently run unit tests on Linux"); }); Target RunNativeTests => _ => _ .Unlisted() .DependsOn(RunNativeTestsWindows) .DependsOn(RunNativeTestsLinux); Target CompileDependencyLibs => _ => _ .Unlisted() .After(Restore) .After(CompileManagedSrc) .Executes(() => { // Always AnyCPU DotNetMSBuild(x => x .SetTargetPath(MsBuildProject) .SetConfiguration(BuildConfiguration) .SetTargetPlatformAnyCPU() .DisableRestore() .EnableNoDependencies() .SetTargets("BuildDependencyLibs") ); }); Target CompileRegressionDependencyLibs => _ => _ .Unlisted() .After(Restore) .After(CompileManagedSrc) .Executes(() => { // We run linux integration tests in AnyCPU, but Windows on the specific architecture var platform = IsLinux ? MSBuildTargetPlatform.MSIL : TargetPlatform; DotNetMSBuild(x => x .SetTargetPath(MsBuildProject) .SetTargetPlatformAnyCPU() .DisableRestore() .EnableNoDependencies() .SetConfiguration(BuildConfiguration) .SetTargetPlatform(platform) .SetTargets("BuildRegressionDependencyLibs") ); }); Target CompileRegressionSamples => _ => _ .Unlisted() .After(Restore) .After(CreatePlatformlessSymlinks) .After(CompileRegressionDependencyLibs) .Requires(() => Framework) .Executes(() => { var regressionsDirectory = Solution.GetProject(Projects.EntityFramework6xMdTokenLookupFailure) .Directory.Parent; var regressionLibs = GlobFiles(regressionsDirectory / "**" / "*.csproj") .Where(path => (path, Solution.GetProject(path).TryGetTargetFrameworks()) switch { _ when path.Contains("EntityFramework6x.MdTokenLookupFailure") => false, _ when path.Contains("ExpenseItDemo") => false, _ when path.Contains("StackExchange.Redis.AssemblyConflict.LegacyProject") => false, _ when path.Contains("MismatchedTracerVersions") => false, _ when path.Contains("dependency-libs") => false, _ when !string.IsNullOrWhiteSpace(SampleName) => path.Contains(SampleName), (_ , var targets) when targets is not null => targets.Contains(Framework), _ => true, } ); // Allow restore here, otherwise things go wonky with runtime identifiers // in some target frameworks. No, I don't know why DotNetBuild(x => x // .EnableNoRestore() .EnableNoDependencies() .SetConfiguration(BuildConfiguration) .SetTargetPlatform(TargetPlatform) .SetFramework(Framework) .SetNoWarnDotNetCore3() .When(!string.IsNullOrEmpty(NugetPackageDirectory), o => o.SetPackageDirectory(NugetPackageDirectory)) .CombineWith(regressionLibs, (x, project) => x .SetProjectFile(project))); }); Target CompileFrameworkReproductions => _ => _ .Unlisted() .Description("Builds .NET Framework projects (non SDK-based projects)") .After(CompileRegressionDependencyLibs) .After(CompileDependencyLibs) .After(CreatePlatformlessSymlinks) .Requires(() => IsWin) .Executes(() => { // We have to use the full MSBuild here, as dotnet msbuild doesn't copy the EDMX assets for embedding correctly // seems similar to https://github.com/dotnet/sdk/issues/8360 MSBuild(s => s .SetTargetPath(MsBuildProject) .SetMSBuildPath() .DisableRestore() .EnableNoDependencies() .SetConfiguration(BuildConfiguration) .SetTargetPlatform(TargetPlatform) .SetTargets("BuildFrameworkReproductions") .SetMaxCpuCount(null)); }); Target CompileIntegrationTests => _ => _ .Unlisted() .After(CompileManagedSrc) .After(CompileRegressionSamples) .After(CompileFrameworkReproductions) .After(PublishIisSamples) .Requires(() => Framework) .Requires(() => TracerHomeDirectory != null) .Executes(() => { DotNetMSBuild(s => s .SetTargetPath(MsBuildProject) .SetProperty("TargetFramework", Framework.ToString()) .DisableRestore() .EnableNoDependencies() .SetConfiguration(BuildConfiguration) .SetTargetPlatform(TargetPlatform) .SetTargets("BuildCsharpIntegrationTests") .SetMaxCpuCount(null)); }); Target CompileSamples => _ => _ .Unlisted() .After(CompileDependencyLibs) .After(CreatePlatformlessSymlinks) .After(CompileFrameworkReproductions) .Requires(() => TracerHomeDirectory != null) .Requires(() => Framework) .Executes(() => { // This does some "unnecessary" rebuilding and restoring var includeIntegration = TracerDirectory.GlobFiles("test/test-applications/integrations/**/*.csproj"); // Don't build aspnet full framework sample in this step var includeSecurity = TracerDirectory.GlobFiles("test/test-applications/security/*/*.csproj"); var exclude = TracerDirectory.GlobFiles("test/test-applications/integrations/dependency-libs/**/*.csproj"); var projects = includeIntegration .Concat(includeSecurity) .Select(x => Solution.GetProject(x)) .Where(project => (project, project.TryGetTargetFrameworks()) switch { _ when exclude.Contains(project.Path) => false, _ when project.Path.ToString().Contains("Samples.OracleMDA") => false, _ when !string.IsNullOrWhiteSpace(SampleName) => project.Path.ToString().Contains(SampleName), (_ , var targets) when targets is not null => targets.Contains(Framework), _ => true, } ); // /nowarn:NU1701 - Package 'x' was restored using '.NETFramework,Version=v4.6.1' instead of the project target framework '.NETCoreApp,Version=v2.1'. DotNetBuild(config => config .SetConfiguration(BuildConfiguration) .SetTargetPlatform(TargetPlatform) .EnableNoDependencies() .SetProperty("BuildInParallel", "false") .SetProcessArgumentConfigurator(arg => arg.Add("/nowarn:NU1701")) .CombineWith(projects, (s, project) => s // we have to build this one for all frameworks (because of reasons) .When(!project.Name.Contains("MultiDomainHost"), x => x.SetFramework(Framework)) .SetProjectFile(project))); }); Target PublishIisSamples => _ => _ .Unlisted() .After(CompileManagedTestHelpers) .After(CompileRegressionSamples) .After(CompileFrameworkReproductions) .Executes(() => { var aspnetFolder = TestsDirectory / "test-applications" / "aspnet"; var securityAspnetFolder = TestsDirectory / "test-applications" / "security" / "aspnet"; var aspnetProjects = aspnetFolder.GlobFiles("**/*.csproj"); var securityAspnetProjects = securityAspnetFolder.GlobFiles("**/*.csproj"); var publishProfile = aspnetFolder / "PublishProfiles" / "FolderProfile.pubxml"; MSBuild(x => x .SetMSBuildPath() // .DisableRestore() .EnableNoDependencies() .SetConfiguration(BuildConfiguration) .SetTargetPlatform(TargetPlatform) .SetProperty("DeployOnBuild", true) .SetProperty("PublishProfile", publishProfile) .SetMaxCpuCount(null) .CombineWith(aspnetProjects.Concat(securityAspnetProjects), (c, project) => c .SetTargetPath(project)) ); }); Target RunWindowsIntegrationTests => _ => _ .Unlisted() .After(BuildTracerHome) .After(CompileIntegrationTests) .After(CompileSamples) .After(CompileFrameworkReproductions) .After(BuildWindowsIntegrationTests) .Requires(() => IsWin) .Requires(() => Framework) .Executes(() => { ParallelIntegrationTests.ForEach(EnsureResultsDirectory); ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory); try { DotNetTest(config => config .SetDotnetPath(TargetPlatform) .SetConfiguration(BuildConfiguration) .SetTargetPlatform(TargetPlatform) .SetFramework(Framework) //.WithMemoryDumpAfter(timeoutInMinutes: 30) .EnableNoRestore() .EnableNoBuild() .SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory) .When(!string.IsNullOrEmpty(Filter), c => c.SetFilter(Filter)) .When(CodeCoverage, ConfigureCodeCoverage) .CombineWith(ParallelIntegrationTests, (s, project) => s .EnableTrxLogOutput(GetResultsDirectory(project)) .SetProjectFile(project)), degreeOfParallelism: 4); // TODO: I think we should change this filter to run on Windows by default // (RunOnWindows!=False|Category=Smoke)&LoadFromGAC!=True&IIS!=True DotNetTest(config => config .SetDotnetPath(TargetPlatform) .SetConfiguration(BuildConfiguration) .SetTargetPlatform(TargetPlatform) .SetFramework(Framework) //.WithMemoryDumpAfter(timeoutInMinutes: 30) .EnableNoRestore() .EnableNoBuild() .SetFilter(Filter ?? "RunOnWindows=True&LoadFromGAC!=True&IIS!=True") .SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory) .When(CodeCoverage, ConfigureCodeCoverage) .CombineWith(ClrProfilerIntegrationTests, (s, project) => s .EnableTrxLogOutput(GetResultsDirectory(project)) .SetProjectFile(project))); } finally { MoveLogsToBuildData(); CopyMemoryDumps(); } }); Target RunWindowsRegressionTests => _ => _ .Unlisted() .After(BuildTracerHome) .After(CompileIntegrationTests) .After(CompileRegressionSamples) .After(CompileFrameworkReproductions) .Requires(() => IsWin) .Requires(() => Framework) .Executes(() => { ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory); try { DotNetTest(config => config .SetDotnetPath(TargetPlatform) .SetConfiguration(BuildConfiguration) .SetTargetPlatform(TargetPlatform) .SetFramework(Framework) .EnableNoRestore() .EnableNoBuild() .SetFilter(Filter ?? "Category=Smoke&LoadFromGAC!=True") .SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory) .When(CodeCoverage, ConfigureCodeCoverage) .CombineWith(ClrProfilerIntegrationTests, (s, project) => s .EnableTrxLogOutput(GetResultsDirectory(project)) .SetProjectFile(project))); } finally { MoveLogsToBuildData(); } }); Target RunWindowsIisIntegrationTests => _ => _ .After(BuildTracerHome) .After(CompileIntegrationTests) .After(CompileSamples) .After(CompileFrameworkReproductions) .After(PublishIisSamples) .Requires(() => Framework) .Executes(() => { ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory); try { // Different filter from RunWindowsIntegrationTests DotNetTest(config => config .SetDotnetPath(TargetPlatform) .SetConfiguration(BuildConfiguration) .SetTargetPlatform(TargetPlatform) .SetFramework(Framework) .EnableNoRestore() .EnableNoBuild() .SetFilter(Filter ?? "(RunOnWindows=True)&LoadFromGAC=True") .SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory) .When(CodeCoverage, ConfigureCodeCoverage) .CombineWith(ClrProfilerIntegrationTests, (s, project) => s .EnableTrxLogOutput(GetResultsDirectory(project)) .SetProjectFile(project))); } finally { MoveLogsToBuildData(); } }); Target CompileSamplesLinux => _ => _ .Unlisted() .After(CompileManagedSrc) .After(CompileRegressionDependencyLibs) .After(CompileDependencyLibs) .After(CompileManagedTestHelpers) .Requires(() => TracerHomeDirectory != null) .Requires(() => Framework) .Executes(() => { // There's nothing specifically linux-y here, it's just that we only build a subset of projects // for testing on linux. var sampleProjects = TracerDirectory.GlobFiles("test/test-applications/integrations/*/*.csproj"); var securitySampleProjects = TracerDirectory.GlobFiles("test/test-applications/security/*/*.csproj"); var regressionProjects = TracerDirectory.GlobFiles("test/test-applications/regression/*/*.csproj"); var instrumentationProjects = TracerDirectory.GlobFiles("test/test-applications/instrumentation/*/*.csproj"); // These samples are currently skipped. var projectsToSkip = new[] { "Samples.Msmq", // Doesn't run on Linux "Samples.Owin.WebApi2", // Doesn't run on Linux "Samples.MultiDomainHost.Runner", "Samples.RateLimiter", // I think we _should_ run this one (assuming it has tests) "Samples.SqlServer.NetFramework20", "Samples.TracingWithoutLimits", // I think we _should_ run this one (assuming it has tests) "Samples.Wcf", "Samples.WebRequest.NetFramework20", "AutomapperTest", // I think we _should_ run this one (assuming it has tests) "DogStatsD.RaceCondition", "EntityFramework6x.MdTokenLookupFailure", "LargePayload", // I think we _should_ run this one (assuming it has tests) "Sandbox.ManualTracing", "StackExchange.Redis.AssemblyConflict.LegacyProject", "Samples.OracleMDA", // We don't test these yet "Samples.OracleMDA.Core", // We don't test these yet "MismatchedTracerVersions", }; // These sample projects are built using RestoreAndBuildSamplesForPackageVersions // so no point building them now var multiPackageProjects = new List<string>(); if (TestAllPackageVersions) { var samplesFile = BuildDirectory / "PackageVersionsGeneratorDefinitions.json"; using var fs = File.OpenRead(samplesFile); var json = JsonDocument.Parse(fs); multiPackageProjects = json.RootElement .EnumerateArray() .Select(e => e.GetProperty("SampleProjectName").GetString()) .Distinct() .Where(name => name switch { "Samples.MySql" => false, // the "non package version" is _ALSO_ tested separately _ => true }) .ToList(); } var projectsToBuild = sampleProjects .Concat(securitySampleProjects) .Concat(regressionProjects) .Concat(instrumentationProjects) .Where(path => { var project = Solution.GetProject(path); return project?.Name switch { "Samples.AspNetCoreMvc21" => Framework == TargetFramework.NETCOREAPP2_1, "Samples.AspNetCoreMvc30" => Framework == TargetFramework.NETCOREAPP3_0, "Samples.AspNetCoreMvc31" => Framework == TargetFramework.NETCOREAPP3_1, "Samples.AspNetCore2" => Framework == TargetFramework.NETCOREAPP2_1, "Samples.AspNetCore5" => Framework == TargetFramework.NET5_0 || Framework == TargetFramework.NETCOREAPP3_1 || Framework == TargetFramework.NETCOREAPP3_0, "Samples.GraphQL4" => Framework == TargetFramework.NETCOREAPP3_1 || Framework == TargetFramework.NET5_0, var name when projectsToSkip.Contains(name) => false, var name when multiPackageProjects.Contains(name) => false, "Samples.AspNetCoreRazorPages" => true, _ when !string.IsNullOrWhiteSpace(SampleName) => project?.Name?.Contains(SampleName) ?? false, _ => true, }; }); // do the build and publish separately to avoid dependency issues // Always AnyCPU DotNetBuild(x => x // .EnableNoRestore() .EnableNoDependencies() .SetConfiguration(BuildConfiguration) .SetFramework(Framework) // .SetTargetPlatform(Platform) .SetNoWarnDotNetCore3() .When(TestAllPackageVersions, o => o.SetProperty("TestAllPackageVersions", "true")) .When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true")) .When(!string.IsNullOrEmpty(NugetPackageDirectory), o => o.SetPackageDirectory(NugetPackageDirectory)) .CombineWith(projectsToBuild, (c, project) => c .SetProjectFile(project))); // Always AnyCPU DotNetPublish(x => x .EnableNoRestore() .EnableNoBuild() .EnableNoDependencies() .SetConfiguration(BuildConfiguration) .SetFramework(Framework) // .SetTargetPlatform(Platform) .SetNoWarnDotNetCore3() .When(TestAllPackageVersions, o => o.SetProperty("TestAllPackageVersions", "true")) .When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true")) .When(!string.IsNullOrEmpty(NugetPackageDirectory), o => o.SetPackageDirectory(NugetPackageDirectory)) .CombineWith(projectsToBuild, (c, project) => c .SetProject(project))); }); Target CompileMultiApiPackageVersionSamples => _ => _ .Unlisted() .After(CompileManagedSrc) .After(CompileRegressionDependencyLibs) .After(CompileDependencyLibs) .After(CompileManagedTestHelpers) .After(CompileSamplesLinux) .Requires(() => TracerHomeDirectory != null) .Requires(() => Framework) .Executes(() => { // Build and restore for all versions // Annoyingly this rebuilds everything again and again. var targets = new[] { "RestoreSamplesForPackageVersionsOnly", "RestoreAndBuildSamplesForPackageVersionsOnly" }; // /nowarn:NU1701 - Package 'x' was restored using '.NETFramework,Version=v4.6.1' instead of the project target framework '.NETCoreApp,Version=v2.1'. DotNetMSBuild(x => x .SetTargetPath(MsBuildProject) .SetConfiguration(BuildConfiguration) .EnableNoDependencies() .SetProperty("TargetFramework", Framework.ToString()) .SetProperty("BuildInParallel", "true") .SetProcessArgumentConfigurator(arg => arg.Add("/nowarn:NU1701")) .When(TestAllPackageVersions, o => o.SetProperty("TestAllPackageVersions", "true")) .When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true")) .CombineWith(targets, (c, target) => c.SetTargets(target)) ); }); Target CompileLinuxIntegrationTests => _ => _ .Unlisted() .After(CompileManagedSrc) .After(CompileRegressionDependencyLibs) .After(CompileDependencyLibs) .After(CompileManagedTestHelpers) .After(CompileSamplesLinux) .After(CompileMultiApiPackageVersionSamples) .Requires(() => TracerHomeDirectory != null) .Requires(() => Framework) .Executes(() => { // Build the actual integration test projects for Any CPU var integrationTestProjects = TracerDirectory.GlobFiles("test/*.IntegrationTests/*.csproj"); DotNetBuild(x => x // .EnableNoRestore() .EnableNoDependencies() .SetConfiguration(BuildConfiguration) .SetFramework(Framework) // .SetTargetPlatform(Platform) .SetNoWarnDotNetCore3() .When(TestAllPackageVersions, o => o.SetProperty("TestAllPackageVersions", "true")) .When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true")) .When(!string.IsNullOrEmpty(NugetPackageDirectory), o => o.SetPackageDirectory(NugetPackageDirectory)) .CombineWith(integrationTestProjects, (c, project) => c .SetProjectFile(project))); IntegrationTestLinuxProfilerDirFudge(Projects.ClrProfilerIntegrationTests); IntegrationTestLinuxProfilerDirFudge(Projects.AppSecIntegrationTests); }); Target RunLinuxIntegrationTests => _ => _ .After(CompileLinuxIntegrationTests) .Description("Runs the linux integration tests") .Requires(() => Framework) .Requires(() => IsLinux) .Executes(() => { ParallelIntegrationTests.ForEach(EnsureResultsDirectory); ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory); var filter = (string.IsNullOrEmpty(Filter), IsArm64) switch { (true, false) => "Category!=LinuxUnsupported", (true, true) => "(Category!=ArmUnsupported)&(Category!=LinuxUnsupported)", _ => Filter }; try { // Run these ones in parallel // Always AnyCPU DotNetTest(config => config .SetConfiguration(BuildConfiguration) // .SetTargetPlatform(Platform) .EnableNoRestore() .EnableNoBuild() .SetFramework(Framework) //.WithMemoryDumpAfter(timeoutInMinutes: 30) .SetFilter(filter) .SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory) .When(TestAllPackageVersions, o => o.SetProcessEnvironmentVariable("TestAllPackageVersions", "true")) .When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true")) .When(CodeCoverage, ConfigureCodeCoverage) .CombineWith(ParallelIntegrationTests, (s, project) => s .EnableTrxLogOutput(GetResultsDirectory(project)) .SetProjectFile(project)), degreeOfParallelism: 2); // Run this one separately so we can tail output DotNetTest(config => config .SetConfiguration(BuildConfiguration) // .SetTargetPlatform(Platform) .EnableNoRestore() .EnableNoBuild() .SetFramework(Framework) //.WithMemoryDumpAfter(timeoutInMinutes: 30) .SetFilter(filter) .SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory) .When(TestAllPackageVersions, o => o.SetProcessEnvironmentVariable("TestAllPackageVersions", "true")) .When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true")) .When(CodeCoverage, ConfigureCodeCoverage) .CombineWith(ClrProfilerIntegrationTests, (s, project) => s .EnableTrxLogOutput(GetResultsDirectory(project)) .SetProjectFile(project)) ); } finally { MoveLogsToBuildData(); CopyMemoryDumps(); } }); Target CheckBuildLogsForErrors => _ => _ .Unlisted() .Description("Reads the logs from build_data and checks for error lines") .Executes(() => { // we expect to see _some_ errors, so explcitly ignore them var knownPatterns = new List<Regex> { new(@".*Unable to resolve method MongoDB\..*", RegexOptions.Compiled), new(@".*at CallTargetNativeTest\.NoOp\.Noop\dArgumentsIntegration\.OnAsyncMethodEnd.*", RegexOptions.Compiled), new(@".*at CallTargetNativeTest\.NoOp\.Noop\dArgumentsIntegration\.OnMethodBegin.*", RegexOptions.Compiled), new(@".*at CallTargetNativeTest\.NoOp\.Noop\dArgumentsIntegration\.OnMethodEnd.*", RegexOptions.Compiled), new(@".*at CallTargetNativeTest\.NoOp\.Noop\dArgumentsVoidIntegration\.OnMethodBegin.*", RegexOptions.Compiled), new(@".*at CallTargetNativeTest\.NoOp\.Noop\dArgumentsVoidIntegration\.OnMethodEnd.*", RegexOptions.Compiled), }; var logDirectory = BuildDataDirectory / "logs"; if (DirectoryExists(logDirectory)) { // Should we care about warnings too? var managedErrors = logDirectory.GlobFiles("**/dotnet-tracer-managed-*") .SelectMany(ParseManagedLogFiles) .Where(x => x.Level >= LogLevel.Error) .Where(IsNewError) .ToList(); var nativeErrors = logDirectory.GlobFiles("**/dotnet-tracer-native-*") .SelectMany(ParseNativeLogFiles) .Where(x => x.Level >= LogLevel.Error) .Where(IsNewError) .ToList(); if (managedErrors.Count == 0 && nativeErrors.Count == 0) { Logger.Info("No errors found in managed or native logs"); return; } Logger.Warn("Found the following errors in log files:"); var allErrors = managedErrors .Concat(nativeErrors) .GroupBy(x => x.FileName); foreach (var erroredFile in allErrors) { Logger.Error($"Found errors in log file '{erroredFile.Key}':"); foreach (var error in erroredFile) { Logger.Error($"{error.Timestamp:hh:mm:ss} [{error.Level}] {error.Message}"); } } ExitCode = 1; } bool IsNewError(ParsedLogLine logLine) { foreach (var pattern in knownPatterns) { if (pattern.IsMatch(logLine.Message)) { return false; } } return true; } static List<ParsedLogLine> ParseManagedLogFiles(AbsolutePath logFile) { var regex = new Regex(@"^(\d\d\d\d\-\d\d\-\d\d\W\d\d\:\d\d\:\d\d\.\d\d\d\W\+\d\d\:\d\d)\W\[(.*?)\]\W(.*)", RegexOptions.Compiled); var allLines = File.ReadAllLines(logFile); var allLogs = new List<ParsedLogLine>(allLines.Length); ParsedLogLine currentLine = null; foreach (var line in allLines) { if (string.IsNullOrWhiteSpace(line)) { continue; } var match = regex.Match(line); if (match.Success) { if (currentLine is not null) { allLogs.Add(currentLine); } try { // start of a new log line var timestamp = DateTimeOffset.Parse(match.Groups[1].Value); var level = ParseManagedLogLevel(match.Groups[2].Value); var message = match.Groups[3].Value; currentLine = new ParsedLogLine(timestamp, level, message, logFile); } catch (Exception ex) { Logger.Info($"Error parsing line: '{line}. {ex}"); } } else { if (currentLine is null) { Logger.Warn("Incomplete log line: " + line); } else { currentLine = currentLine with { Message = $"{currentLine.Message}{Environment.NewLine}{line}" }; } } } return allLogs; } static List<ParsedLogLine> ParseNativeLogFiles(AbsolutePath logFile) { var regex = new Regex(@"^(\d\d\/\d\d\/\d\d\W\d\d\:\d\d\:\d\d\.\d\d\d\W\w\w)\W\[.*?\]\W\[(.*?)\](.*)", RegexOptions.Compiled); var allLines = File.ReadAllLines(logFile); var allLogs = new List<ParsedLogLine>(allLines.Length); foreach (var line in allLines) { if (string.IsNullOrWhiteSpace(line)) { continue; } var match = regex.Match(line); if (match.Success) { try { // native logs are on one line var timestamp = DateTimeOffset.ParseExact(match.Groups[1].Value, "MM/dd/yy hh:mm:ss.fff tt", null); var level = ParseNativeLogLevel(match.Groups[2].Value); var message = match.Groups[3].Value; var currentLine = new ParsedLogLine(timestamp, level, message, logFile); allLogs.Add(currentLine); } catch (Exception ex) { Logger.Info($"Error parsing line: '{line}. {ex}"); } } else { Logger.Warn("Incomplete log line: " + line); } } return allLogs; } static LogLevel ParseManagedLogLevel(string value) => value switch { "VRB" => LogLevel.Trace, "DBG" => LogLevel.Trace, "INF" => LogLevel.Normal, "WRN" => LogLevel.Warning, "ERR" => LogLevel.Error, _ => LogLevel.Normal, // Concurrency issues can sometimes garble this so ignore it }; static LogLevel ParseNativeLogLevel(string value) => value switch { "trace" => LogLevel.Trace, "debug" => LogLevel.Trace, "info" => LogLevel.Normal, "warning" => LogLevel.Warning, "error" => LogLevel.Error, _ => LogLevel.Normal, // Concurrency issues can sometimes garble this so ignore it }; Logger.Info($"Skipping log parsing, directory '{logDirectory}' not found"); }); private AbsolutePath GetResultsDirectory(Project proj) => BuildDataDirectory / "results" / proj.Name; private void EnsureResultsDirectory(Project proj) => EnsureCleanDirectory(GetResultsDirectory(proj)); private (string, string) GetUnixArchitectureAndExtension(bool includeMuslSuffixOnAlpine) { return (IsOsx, IsAlpine, includeMuslSuffixOnAlpine) switch { (true, _, _) => ("osx-x64", "dylib"), (_, true, true) => ($"linux-musl-{LinuxArchitectureIdentifier}", "so"), _ => ($"linux-{LinuxArchitectureIdentifier}", "so"), }; } // the integration tests need their own copy of the profiler, this achived through build.props on Windows, but doesn't seem to work under Linux private void IntegrationTestLinuxProfilerDirFudge(string project) { // Not sure if/why this is necessary, and we can't just point to the correct output location var src = TracerHomeDirectory; var testProject = Solution.GetProject(project).Directory; var dest = testProject / "bin" / BuildConfiguration / Framework / "profiler-lib"; CopyDirectoryRecursively(src, dest, DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite); // not sure exactly where this is supposed to go, may need to change the original build foreach (var linuxDir in TracerHomeDirectory.GlobDirectories("linux-*")) { CopyDirectoryRecursively(linuxDir, dest, DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite); } } private void MoveLogsToBuildData() { if (Directory.Exists(TracerLogDirectory)) { CopyDirectoryRecursively(TracerLogDirectory, BuildDataDirectory / "logs", DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite); } if (Directory.Exists(TempDirectory)) { foreach (var dump in GlobFiles(TempDirectory, "coredump*")) { MoveFileToDirectory(dump, BuildDataDirectory / "dumps", FileExistsPolicy.Overwrite); } } } private void CopyMemoryDumps() { foreach (var file in Directory.EnumerateFiles(TracerDirectory, "*.dmp", SearchOption.AllDirectories)) { CopyFileToDirectory(file, BuildDataDirectory, FileExistsPolicy.OverwriteIfNewer); } } private DotNetTestSettings ConfigureCodeCoverage(DotNetTestSettings settings) { var strongNameKeyPath = Solution.Directory / "Datadog.Trace.snk"; return settings.SetDataCollector("XPlat Code Coverage") .SetProcessArgumentConfigurator( args => args.Add("--") .Add("RunConfiguration.DisableAppDomain=true") // https://github.com/coverlet-coverage/coverlet/issues/347 .Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.SkipAutoProps=true") .Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.Format=cobertura") .Add($"DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.StrongNameKey=\"{strongNameKeyPath}\"") .Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.ExcludeByFile=\"**/NuGet/**/LibLog/**/*.cs\",") .Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.Exclude=\"[*]Datadog.Trace.Vendors.*,[Datadog.Trace]System.*,[Datadog.Trace]Mono.*\",") .Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.Include=\"[Datadog.Trace.ClrProfiler.*]*,[Datadog.Trace]*,[Datadog.Trace.AspNet]*\"")); } protected override void OnTargetStart(string target) { if (PrintDriveSpace) { foreach (var drive in DriveInfo.GetDrives().Where(d => d.IsReady)) { Logger.Info($"Drive space available on '{drive.Name}': {PrettyPrint(drive.AvailableFreeSpace)} / {PrettyPrint(drive.TotalSize)}"); } } base.OnTargetStart(target); static string PrettyPrint(long bytes) { var power = Math.Min((int)Math.Log(bytes, 1000), 4); var normalised = bytes / Math.Pow(1000, power); return power switch { 4 => $"{normalised:F}TB", 3 => $"{normalised:F}GB", 2 => $"{normalised:F}MB", 1 => $"{normalised:F}KB", _ => $"{bytes}B", }; } } private record ParsedLogLine(DateTimeOffset Timestamp, LogLevel Level, string Message, AbsolutePath FileName); }
1
22,663
FYI, I have a branch I started to improve this. Nuke can read the target frameworks from the project files, so we don't have to do this mess. Ran into some other issues so it didn't take priority
DataDog-dd-trace-dotnet
.cs
@@ -364,16 +364,11 @@ void getNbrAtomAndBondIds(unsigned int aid, const RDKit::ROMol *mol, unsigned int na = mol->getNumAtoms(); URANGE_CHECK(aid, na); - RDKit::ROMol::ADJ_ITER nbrIdx, endNbrs; - boost::tie(nbrIdx, endNbrs) = mol->getAtomNeighbors(mol->getAtomWithIdx(aid)); - - unsigned int ai, bi; - while (nbrIdx != endNbrs) { - ai = (*nbrIdx); - bi = mol->getBondBetweenAtoms(aid, ai)->getIdx(); + for (auto ai : boost::make_iterator_range( + mol->getAtomNeighbors(mol->getAtomWithIdx(aid)))) { + auto bi = mol->getBondBetweenAtoms(aid, ai)->getIdx(); aids.push_back(ai); bids.push_back(bi); - nbrIdx++; } }
1
// $Id$ // // Copyright (C) 2003-2010 greg Landrum and Rational Discovery LLC // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include <RDGeneral/types.h> #include <cmath> #include <Geometry/point.h> #include <Geometry/Transform2D.h> #include "DepictUtils.h" #include <iostream> #include <RDGeneral/Invariant.h> #include <algorithm> namespace RDDepict { double BOND_LEN = 1.5; double COLLISION_THRES = 0.70; double BOND_THRES = 0.50; double ANGLE_OPEN = 0.1222; // that is about 7 deg unsigned int MAX_COLL_ITERS = 15; double HETEROATOM_COLL_SCALE = 1.3; unsigned int NUM_BONDS_FLIPS = 3; RDGeom::INT_POINT2D_MAP embedRing(const RDKit::INT_VECT &ring) { // The process here is very straight forward // we take the center of the ring to lies at the origin put the first // point at the origin and then sweep // anticlockwise so by an angle A = 360/n for the next point // the length of the arm (l) we want to sweep is easy to compute given the // bond length (b) we want to use for each bond in the ring (for now // we will assume that this bond length is the same for all bonds in the ring // l = b/sqrt(2*(1 - cos(A)) // the above formula derives from the triangle formula, where side 'c' is // given // in terms of sides 'a' and 'b' as // c = a^2 + b^2 - 2.a.b.cos(A) // where A is the angle between a and b // compute the sweep angle unsigned int na = ring.size(); double ang = 2 * M_PI / na; // compute the arm length double al = BOND_LEN / (sqrt(2 * (1 - cos(ang)))); RDGeom::INT_POINT2D_MAP res; unsigned int i, aid; double x, y; for (i = 0; i < na; i++) { x = al * cos(i * ang); y = al * sin(i * ang); RDGeom::Point2D loc(x, y); aid = ring[i]; res[aid] = loc; } return res; } void transformPoints(RDGeom::INT_POINT2D_MAP &nringCor, const RDGeom::Transform2D &trans) { RDGeom::INT_POINT2D_MAP_I nrci; for (nrci = nringCor.begin(); nrci != nringCor.end(); nrci++) { RDGeom::Point2D loc = nrci->second; trans.TransformPoint(loc); nrci->second = loc; } } RDGeom::Point2D computeBisectPoint(const RDGeom::Point2D &rcr, double ang, const RDGeom::Point2D &nb1, const RDGeom::Point2D &nb2) { RDGeom::Point2D cloc = nb1; cloc += nb2; cloc *= 0.5; if (ang > M_PI) { // invert the cloc cloc -= rcr; cloc *= -1.0; cloc += rcr; } return cloc; } RDGeom::Point2D reflectPoint(const RDGeom::Point2D &point, const RDGeom::Point2D &loc1, const RDGeom::Point2D &loc2) { RDGeom::Point2D org(0.0, 0.0); RDGeom::Point2D xaxis(1.0, 0.0); RDGeom::Point2D cent = (loc1 + loc2); cent *= 0.5; RDGeom::Transform2D trans; trans.SetTransform(org, xaxis, cent, loc1); /// reverse transform RDGeom::Transform2D itrans; itrans.SetTransform(cent, loc1, org, xaxis); RDGeom::INT_POINT2D_MAP_I nci; RDGeom::Point2D res; res = point; trans.TransformPoint(res); res.y = -res.y; itrans.TransformPoint(res); return res; } void reflectPoints(RDGeom::INT_POINT2D_MAP &coordMap, const RDGeom::Point2D &loc1, const RDGeom::Point2D &loc2) { RDGeom::INT_POINT2D_MAP_I nci; for (nci = coordMap.begin(); nci != coordMap.end(); nci++) { nci->second = reflectPoint(nci->second, loc1, loc2); } } RDKit::INT_VECT setNbrOrder(unsigned int aid, const RDKit::INT_VECT &nbrs, const RDKit::ROMol &mol) { PRECONDITION(aid < mol.getNumAtoms(), ""); PR_QUEUE subsAid; int ref = -1; RDKit::ROMol::ADJ_ITER nbrIdx, endNbrs; boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(mol.getAtomWithIdx(aid)); // find the neighbor of aid that is not in nbrs i.e. atom A from the comments // in the header file // and the store the pair <degree, aid> in the order of increasing degree while (nbrIdx != endNbrs) { // We used to use degree here instead we will start using the CIP rank here if (std::find(nbrs.begin(), nbrs.end(), static_cast<int>(*nbrIdx)) == nbrs.end()) { ref = (*nbrIdx); } nbrIdx++; } RDKit::INT_VECT thold = nbrs; if (ref >= 0) { thold.push_back(ref); } // we should be here unless we have more than 3 atoms to worry about CHECK_INVARIANT(thold.size() > 3, ""); thold = rankAtomsByRank(mol, thold); // swap the position of the 3rd to last and second to last items in sorted // list unsigned int ln = thold.size(); int tint = thold[ln - 3]; thold[ln - 3] = thold[ln - 2]; thold[ln - 2] = tint; // go clock wise along the list from this position for the arranged neighbor // list RDKit::INT_VECT res; res.reserve(thold.size()); auto pos = std::find(thold.begin(), thold.end(), ref); if (pos != thold.end()) { res.insert(res.end(), pos + 1, thold.end()); } if (pos != thold.begin()) { res.insert(res.end(), thold.begin(), pos); } POSTCONDITION(res.size() == nbrs.size(), ""); return res; } int pickFirstRingToEmbed(const RDKit::ROMol &mol, const RDKit::VECT_INT_VECT &fusedRings) { // ok this is what we will do here // we will pick the ring with the smallest number of substituents int res = -1; unsigned int maxSize = 0; int subs, minsubs = static_cast<int>(1e8); int cnt = 0; for (const auto &fusedRing : fusedRings) { subs = 0; for (auto rii : fusedRing) { int deg = mol.getAtomWithIdx(rii)->getDegree(); if (deg > 2) { subs++; } } if (subs < minsubs) { res = cnt; minsubs = subs; maxSize = fusedRing.size(); } else if (subs == minsubs) { if (fusedRing.size() > maxSize) { res = cnt; maxSize = fusedRing.size(); } } cnt++; } return res; } RDKit::INT_VECT findNextRingToEmbed(const RDKit::INT_VECT &doneRings, const RDKit::VECT_INT_VECT &fusedRings, int &nextId) { // REVIEW: We are changing this after Issue166 // Originally the ring that have maximum number of atoms in common with the // atoms // that have already been embedded will be the ring that will get embedded. // But // if we can find a ring with two atoms in common with the embedded atoms, we // will // choose that first before systems with more than 2 atoms in common. Cases // with two atoms // in common are in general flat systems to start with and can be embedded // cleanly. // when there are more than 2 atoms in common, these are most likely bridged // systems, which are // screwed up anyway, might as well screw them up later // if we do not have a system with two rings in common then we will return the // ring with max, // common atoms PRECONDITION(doneRings.size() > 0, ""); PRECONDITION(fusedRings.size() > 1, ""); RDKit::INT_VECT commonAtoms, res, doneAtoms, notDone; for (int i = 0; i < rdcast<int>(fusedRings.size()); i++) { if (std::find(doneRings.begin(), doneRings.end(), i) == doneRings.end()) { notDone.push_back(i); } } RDKit::Union(fusedRings, doneAtoms, &notDone); int maxCommonAtoms = 0; int currRingId = 0; for (const auto &fusedRing : fusedRings) { if (std::find(doneRings.begin(), doneRings.end(), currRingId) != doneRings.end()) { currRingId++; continue; } commonAtoms.clear(); int numCommonAtoms = 0; for (auto rii : fusedRing) { if (std::find(doneAtoms.begin(), doneAtoms.end(), (rii)) != doneAtoms.end()) { commonAtoms.push_back(rii); numCommonAtoms++; } } if (numCommonAtoms == 2) { // if we found a ring with two atoms in common get out nextId = currRingId; return commonAtoms; // FIX: this causes the rendering to be non-canonical } if (numCommonAtoms > maxCommonAtoms) { maxCommonAtoms = numCommonAtoms; nextId = currRingId; res = commonAtoms; } currRingId++; } // here is an additional constrain we will put on the common atoms // it is quite likely that the common atoms form a chain (it is possible we // can // construct some weird cases where this does not hold true - but for now we // will // assume this is true. However the IDs in the res may not be in the order of // going // from one end of the chain to the other - here is an example // C1CCC(CC12)CCC2 - two rings here with three atoms in common // let ring1:(0,1,2,3,4,5) be a ring that is already embedded, then let // ring2:(4,3,6,7,8,5) be the ring // that we found to be the next ring we should embed. The commonAtoms are // (4,3,5) - note that // they will be in this order since the rings are always traversed in order. // Now we would like these // common atoms to be returned in the order (5,4,3) - then we have a // continuous chain, we can // do this by simply looking at the original ring order (4,3,6,7,8,5) and // observing that 5 need to come to // the front // find out how many atoms from the end we need to move to the front unsigned int cmnLst = 0; unsigned int nCmn = res.size(); for (unsigned int i = 0; i < nCmn; i++) { if (res[i] == fusedRings[nextId][i]) { cmnLst++; } else { break; } } // now do the moving if we have to if ((cmnLst > 0) && (cmnLst < res.size())) { RDKit::INT_VECT tempV = res; for (unsigned int i = cmnLst; i < nCmn; i++) { res[i - cmnLst] = tempV[i]; } unsigned int nMov = nCmn - cmnLst; for (unsigned int i = 0; i < cmnLst; i++) { res[nMov + i] = tempV[i]; } } POSTCONDITION(res.size() > 0, ""); return res; } RDKit::INT_VECT getAllRotatableBonds(const RDKit::ROMol &mol) { RDKit::INT_VECT res; RDKit::ROMol::ConstBondIterator bondIt; for (bondIt = mol.beginBonds(); bondIt != mol.endBonds(); bondIt++) { int bid = (*bondIt)->getIdx(); if (((*bondIt)->getStereo() <= RDKit::Bond::STEREOANY) && (!(mol.getRingInfo()->numBondRings(bid)))) { res.push_back(bid); } } return res; } RDKit::INT_VECT getRotatableBonds(const RDKit::ROMol &mol, unsigned int aid1, unsigned int aid2) { PRECONDITION(aid1 < mol.getNumAtoms(), ""); PRECONDITION(aid2 < mol.getNumAtoms(), ""); RDKit::INT_LIST path = RDKit::MolOps::getShortestPath(mol, aid1, aid2); RDKit::INT_VECT res; if (path.size() >= 4) { // remove the first atom (aid1) and last atom (aid2) CHECK_INVARIANT(static_cast<unsigned int>(path.front()) == aid1, "bad first element"); path.pop_front(); CHECK_INVARIANT(static_cast<unsigned int>(path.back()) == aid2, "bad last element"); path.pop_back(); RDKit::INT_LIST_CI pi = path.begin(); int pid = (*pi); ++pi; while (pi != path.end()) { int aid = (*pi); const RDKit::Bond *bond = mol.getBondBetweenAtoms(pid, aid); int bid = bond->getIdx(); if ((bond->getStereo() <= RDKit::Bond::STEREOANY) && (!(mol.getRingInfo()->numBondRings(bid)))) { res.push_back(bid); } pid = aid; ++pi; } } return res; } void getNbrAtomAndBondIds(unsigned int aid, const RDKit::ROMol *mol, RDKit::INT_VECT &aids, RDKit::INT_VECT &bids) { CHECK_INVARIANT(mol, ""); unsigned int na = mol->getNumAtoms(); URANGE_CHECK(aid, na); RDKit::ROMol::ADJ_ITER nbrIdx, endNbrs; boost::tie(nbrIdx, endNbrs) = mol->getAtomNeighbors(mol->getAtomWithIdx(aid)); unsigned int ai, bi; while (nbrIdx != endNbrs) { ai = (*nbrIdx); bi = mol->getBondBetweenAtoms(aid, ai)->getIdx(); aids.push_back(ai); bids.push_back(bi); nbrIdx++; } } // find pairs of bonds that can be permuted at a non-ring degree 4 // node. This function will return only those pairs that cannot be // permuted by flipping a rotatable bond // // D // | // b3 // | // A-b1-B-b2-C // | // b4 // | // E // For example in the above situation on the pairs (b1, b3) and (b1, b4) will be // returned // All other permutations can be achieved via a rotatable bond flip. INT_PAIR_VECT findBondsPairsToPermuteDeg4(const RDGeom::Point2D &center, const RDKit::INT_VECT &nbrBids, const VECT_C_POINT &nbrLocs) { INT_PAIR_VECT res; // make sure there are four of them CHECK_INVARIANT(nbrBids.size() == 4, ""); CHECK_INVARIANT(nbrLocs.size() == 4, ""); std::vector<RDGeom::Point2D> nbrPts; nbrPts.reserve(nbrLocs.size()); for (const auto &nloc : nbrLocs) { RDGeom::Point2D v = (*nloc) - center; nbrPts.push_back(v); } // now find the lay out of the bonds and return the bonds that are 90deg to // the // the bond to the first neighbor; i.e. we want to find b3 and b4 in the above // picture double dp1 = nbrPts[0].dotProduct(nbrPts[1]); if (fabs(dp1) < 1.e-3) { // the first two vectors are perpendicular to each other. We now have b1 and // b3 we need to // find b4 INT_PAIR p1(nbrBids[0], nbrBids[1]); res.push_back(p1); double dp2 = nbrPts[0].dotProduct(nbrPts[2]); if (fabs(dp2) < 1.e-3) { // now we found b4 as well return the results INT_PAIR p2(nbrBids[0], nbrBids[2]); res.push_back(p2); } else { // bids[0] and bids[2] are opposite to each other and we know bids[1] is // perpendicular to bids[0]. So bids[3] is also perpendicular to bids[0] INT_PAIR p2(nbrBids[0], nbrBids[3]); res.push_back(p2); } return res; } else { // bids[0] and bids[1] are opposite to each other, so bids[2] and bids[3] // must // be perpendicular to bids[0] INT_PAIR p1(nbrBids[0], nbrBids[2]); res.push_back(p1); INT_PAIR p2(nbrBids[0], nbrBids[3]); res.push_back(p2); return res; } } // compare the first elements of two pairs of integers/ int _pairCompDescending(const INT_PAIR &arg1, const INT_PAIR &arg2) { return (arg1.first != arg2.first ? arg1.first > arg2.first : arg1.second > arg2.second); } int _pairCompAscending(const INT_PAIR &arg1, const INT_PAIR &arg2) { return (arg1.first != arg2.first ? arg1.first < arg2.first : arg1.second < arg2.second); } template <class T> T rankAtomsByRank(const RDKit::ROMol &mol, const T &commAtms, bool ascending) { size_t natms = commAtms.size(); INT_PAIR_VECT rankAid; rankAid.reserve(natms); T res; typename T::const_iterator ci; for (ci = commAtms.begin(); ci != commAtms.end(); ci++) { unsigned int rank; const RDKit::Atom *at = mol.getAtomWithIdx(*ci); if (at->hasProp(RDKit::common_properties::_CIPRank)) { at->getProp(RDKit::common_properties::_CIPRank, rank); } else { rank = mol.getNumAtoms() * getAtomDepictRank(at) + (*ci); } rankAid.push_back(std::make_pair(rank, (*ci))); } if (ascending) { std::stable_sort(rankAid.begin(), rankAid.end(), _pairCompAscending); } else { std::stable_sort(rankAid.begin(), rankAid.end(), _pairCompDescending); } INT_PAIR_VECT_CI rai; for (rai = rankAid.begin(); rai != rankAid.end(); rai++) { res.push_back(rai->second); } return res; } template RDKit::INT_VECT rankAtomsByRank(const RDKit::ROMol &mol, const RDKit::INT_VECT &commAtms, bool ascending); template RDKit::INT_DEQUE rankAtomsByRank(const RDKit::ROMol &mol, const RDKit::INT_DEQUE &commAtms, bool ascending); template RDKit::INT_LIST rankAtomsByRank(const RDKit::ROMol &mol, const RDKit::INT_LIST &commAtms, bool ascending); } // namespace RDDepict
1
23,618
This is just `mol->atomNeightors()`, is it?
rdkit-rdkit
cpp
@@ -40,10 +40,11 @@ using daal_kmeans_lloyd_dense_ucapi_kernel_t = daal_kmeans::internal::KMeansDenseLloydBatchKernelUCAPI<Float>; template <typename Float> -struct infer_kernel_gpu<Float, method::by_default> { - infer_result operator()(const dal::backend::context_gpu& ctx, - const descriptor_base& params, - const infer_input& input) const { +struct infer_kernel_gpu<Float, method::by_default, task::clustering> { + infer_result<task::clustering> operator()(const dal::backend::context_gpu& ctx, + const descriptor_base<task::clustering>& params, + const infer_input<task::clustering>& input) const { + using Task = task::clustering; auto& queue = ctx.get_queue(); interop::execution_context_guard guard(queue);
1
/******************************************************************************* * Copyright 2020 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #define DAAL_SYCL_INTERFACE #define DAAL_SYCL_INTERFACE_USM #define DAAL_SYCL_INTERFACE_REVERSED_RANGE #include <src/algorithms/kmeans/oneapi/kmeans_dense_lloyd_batch_kernel_ucapi.h> #include "oneapi/dal/algo/kmeans/backend/gpu/infer_kernel.hpp" #include "oneapi/dal/backend/interop/common_dpc.hpp" #include "oneapi/dal/backend/interop/error_converter.hpp" #include "oneapi/dal/backend/interop/table_conversion.hpp" #include "oneapi/dal/table/row_accessor.hpp" namespace oneapi::dal::kmeans::backend { using std::int64_t; using dal::backend::context_gpu; namespace daal_kmeans = daal::algorithms::kmeans; namespace interop = dal::backend::interop; template <typename Float> using daal_kmeans_lloyd_dense_ucapi_kernel_t = daal_kmeans::internal::KMeansDenseLloydBatchKernelUCAPI<Float>; template <typename Float> struct infer_kernel_gpu<Float, method::by_default> { infer_result operator()(const dal::backend::context_gpu& ctx, const descriptor_base& params, const infer_input& input) const { auto& queue = ctx.get_queue(); interop::execution_context_guard guard(queue); const auto data = input.get_data(); const int64_t row_count = data.get_row_count(); const int64_t column_count = data.get_column_count(); const int64_t cluster_count = params.get_cluster_count(); const int64_t max_iteration_count = 0; daal_kmeans::Parameter par(cluster_count, max_iteration_count); par.resultsToEvaluate = daal_kmeans::computeAssignments; auto arr_data = row_accessor<const Float>{ data }.pull(queue); const auto daal_data = interop::convert_to_daal_sycl_homogen_table(queue, arr_data, data.get_row_count(), data.get_column_count()); auto arr_initial_centroids = row_accessor<const Float>{ input.get_model().get_centroids() }.pull(queue); array<int> arr_centroids = array<int>::empty(queue, cluster_count * column_count); array<int> arr_labels = array<int>::empty(queue, row_count); array<Float> arr_objective_function_value = array<Float>::empty(queue, 1); array<int> arr_iteration_count = array<int>::empty(queue, 1); const auto daal_initial_centroids = interop::convert_to_daal_sycl_homogen_table(queue, arr_initial_centroids, cluster_count, column_count); const auto daal_centroids = interop::convert_to_daal_sycl_homogen_table(queue, arr_centroids, cluster_count, column_count); const auto daal_labels = interop::convert_to_daal_sycl_homogen_table(queue, arr_labels, row_count, 1); const auto daal_objective_function_value = interop::convert_to_daal_sycl_homogen_table(queue, arr_objective_function_value, 1, 1); const auto daal_iteration_count = interop::convert_to_daal_sycl_homogen_table(queue, arr_iteration_count, 1, 1); daal::data_management::NumericTable* daal_input[2] = { daal_data.get(), daal_initial_centroids.get() }; daal::data_management::NumericTable* daal_output[4] = { daal_centroids.get(), daal_labels.get(), daal_objective_function_value.get(), daal_iteration_count.get() }; interop::status_to_exception( daal_kmeans_lloyd_dense_ucapi_kernel_t<Float>().compute(daal_input, daal_output, &par)); return infer_result() .set_labels( dal::detail::homogen_table_builder{}.reset(arr_labels, row_count, 1).build()) .set_objective_function_value(static_cast<double>(arr_objective_function_value[0])); } }; template struct infer_kernel_gpu<float, method::by_default>; template struct infer_kernel_gpu<double, method::by_default>; } // namespace oneapi::dal::kmeans::backend
1
24,491
This alias is just for a single occurrence. Maybe it ins't necessary?
oneapi-src-oneDAL
cpp
@@ -1198,7 +1198,7 @@ class CppGenerator : public BaseGenerator { code_ += " }"; } } - code_ += " default: return false;"; + code_ += " default: return true;"; code_ += " }"; code_ += "}"; code_ += "";
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // independent from idl_parser, since this code is not needed for most clients #include "flatbuffers/code_generators.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" #include "flatbuffers/util.h" #include <unordered_set> namespace flatbuffers { // Pedantic warning free version of toupper(). inline char ToUpper(char c) { return static_cast<char>(::toupper(c)); } static std::string GeneratedFileName(const std::string &path, const std::string &file_name) { return path + file_name + "_generated.h"; } namespace cpp { class CppGenerator : public BaseGenerator { public: CppGenerator(const Parser &parser, const std::string &path, const std::string &file_name) : BaseGenerator(parser, path, file_name, "", "::"), cur_name_space_(nullptr) { static const char * const keywords[] = { "alignas", "alignof", "and", "and_eq", "asm", "atomic_cancel", "atomic_commit", "atomic_noexcept", "auto", "bitand", "bitor", "bool", "break", "case", "catch", "char", "char16_t", "char32_t", "class", "compl", "concept", "const", "constexpr", "const_cast", "continue", "co_await", "co_return", "co_yield", "decltype", "default", "delete", "do", "double", "dynamic_cast", "else", "enum", "explicit", "export", "extern", "false", "float", "for", "friend", "goto", "if", "import", "inline", "int", "long", "module", "mutable", "namespace", "new", "noexcept", "not", "not_eq", "nullptr", "operator", "or", "or_eq", "private", "protected", "public", "register", "reinterpret_cast", "requires", "return", "short", "signed", "sizeof", "static", "static_assert", "static_cast", "struct", "switch", "synchronized", "template", "this", "thread_local", "throw", "true", "try", "typedef", "typeid", "typename", "union", "unsigned", "using", "virtual", "void", "volatile", "wchar_t", "while", "xor", "xor_eq", nullptr }; for (auto kw = keywords; *kw; kw++) keywords_.insert(*kw); } std::string GenIncludeGuard() const { // Generate include guard. std::string guard = file_name_; // Remove any non-alpha-numeric characters that may appear in a filename. struct IsAlnum { bool operator()(char c) const { return !is_alnum(c); } }; guard.erase(std::remove_if(guard.begin(), guard.end(), IsAlnum()), guard.end()); guard = "FLATBUFFERS_GENERATED_" + guard; guard += "_"; // For further uniqueness, also add the namespace. auto name_space = parser_.current_namespace_; for (auto it = name_space->components.begin(); it != name_space->components.end(); ++it) { guard += *it + "_"; } guard += "H_"; std::transform(guard.begin(), guard.end(), guard.begin(), ToUpper); return guard; } void GenIncludeDependencies() { int num_includes = 0; for (auto it = parser_.native_included_files_.begin(); it != parser_.native_included_files_.end(); ++it) { code_ += "#include \"" + *it + "\""; num_includes++; } for (auto it = parser_.included_files_.begin(); it != parser_.included_files_.end(); ++it) { if (it->second.empty()) continue; auto noext = flatbuffers::StripExtension(it->second); auto basename = flatbuffers::StripPath(noext); code_ += "#include \"" + parser_.opts.include_prefix + (parser_.opts.keep_include_path ? noext : basename) + "_generated.h\""; num_includes++; } if (num_includes) code_ += ""; } std::string EscapeKeyword(const std::string &name) const { return keywords_.find(name) == keywords_.end() ? name : name + "_"; } std::string Name(const Definition &def) const { return EscapeKeyword(def.name); } std::string Name(const EnumVal &ev) const { return EscapeKeyword(ev.name); } // Iterate through all definitions we haven't generate code for (enums, // structs, and tables) and output them to a single file. bool generate() { code_.Clear(); code_ += "// " + std::string(FlatBuffersGeneratedWarning()) + "\n\n"; const auto include_guard = GenIncludeGuard(); code_ += "#ifndef " + include_guard; code_ += "#define " + include_guard; code_ += ""; if (parser_.opts.gen_nullable) { code_ += "#pragma clang system_header\n\n"; } code_ += "#include \"flatbuffers/flatbuffers.h\""; if (parser_.uses_flexbuffers_) { code_ += "#include \"flatbuffers/flexbuffers.h\""; } code_ += ""; if (parser_.opts.include_dependence_headers) { GenIncludeDependencies(); } FLATBUFFERS_ASSERT(!cur_name_space_); // Generate forward declarations for all structs/tables, since they may // have circular references. for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.generated) { SetNameSpace(struct_def.defined_namespace); code_ += "struct " + Name(struct_def) + ";"; if (parser_.opts.generate_object_based_api) { auto nativeName = NativeName(Name(struct_def), &struct_def, parser_.opts); if (!struct_def.fixed) { code_ += "struct " + nativeName + ";"; } } code_ += ""; } } // Generate forward declarations for all equal operators if (parser_.opts.generate_object_based_api && parser_.opts.gen_compare) { for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.generated) { SetNameSpace(struct_def.defined_namespace); auto nativeName = NativeName(Name(struct_def), &struct_def, parser_.opts); code_ += "bool operator==(const " + nativeName + " &lhs, const " + nativeName + " &rhs);"; } } code_ += ""; } // Generate preablmle code for mini reflection. if (parser_.opts.mini_reflect != IDLOptions::kNone) { // To break cyclic dependencies, first pre-declare all tables/structs. for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenMiniReflectPre(&struct_def); } } } // Generate code for all the enum declarations. for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end(); ++it) { const auto &enum_def = **it; if (!enum_def.generated) { SetNameSpace(enum_def.defined_namespace); GenEnum(enum_def); } } // Generate code for all structs, then all tables. for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (struct_def.fixed && !struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenStruct(struct_def); } } for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.fixed && !struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenTable(struct_def); } } for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.fixed && !struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenTablePost(struct_def); } } // Generate code for union verifiers. for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end(); ++it) { const auto &enum_def = **it; if (enum_def.is_union && !enum_def.generated) { SetNameSpace(enum_def.defined_namespace); GenUnionPost(enum_def); } } // Generate code for mini reflection. if (parser_.opts.mini_reflect != IDLOptions::kNone) { // Then the unions/enums that may refer to them. for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end(); ++it) { const auto &enum_def = **it; if (!enum_def.generated) { SetNameSpace(enum_def.defined_namespace); GenMiniReflect(nullptr, &enum_def); } } // Then the full tables/structs. for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (!struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenMiniReflect(&struct_def, nullptr); } } } // Generate convenient global helper functions: if (parser_.root_struct_def_) { auto &struct_def = *parser_.root_struct_def_; SetNameSpace(struct_def.defined_namespace); auto name = Name(struct_def); auto qualified_name = cur_name_space_->GetFullyQualifiedName(name); auto cpp_name = TranslateNameSpace(qualified_name); code_.SetValue("STRUCT_NAME", name); code_.SetValue("CPP_NAME", cpp_name); code_.SetValue("NULLABLE_EXT", NullableExtension()); // The root datatype accessor: code_ += "inline \\"; code_ += "const {{CPP_NAME}} *{{NULLABLE_EXT}}Get{{STRUCT_NAME}}(const void " "*buf) {"; code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(buf);"; code_ += "}"; code_ += ""; code_ += "inline \\"; code_ += "const {{CPP_NAME}} *{{NULLABLE_EXT}}GetSizePrefixed{{STRUCT_NAME}}(const void " "*buf) {"; code_ += " return flatbuffers::GetSizePrefixedRoot<{{CPP_NAME}}>(buf);"; code_ += "}"; code_ += ""; if (parser_.opts.mutable_buffer) { code_ += "inline \\"; code_ += "{{STRUCT_NAME}} *GetMutable{{STRUCT_NAME}}(void *buf) {"; code_ += " return flatbuffers::GetMutableRoot<{{STRUCT_NAME}}>(buf);"; code_ += "}"; code_ += ""; } if (parser_.file_identifier_.length()) { // Return the identifier code_ += "inline const char *{{STRUCT_NAME}}Identifier() {"; code_ += " return \"" + parser_.file_identifier_ + "\";"; code_ += "}"; code_ += ""; // Check if a buffer has the identifier. code_ += "inline \\"; code_ += "bool {{STRUCT_NAME}}BufferHasIdentifier(const void *buf) {"; code_ += " return flatbuffers::BufferHasIdentifier("; code_ += " buf, {{STRUCT_NAME}}Identifier());"; code_ += "}"; code_ += ""; } // The root verifier. if (parser_.file_identifier_.length()) { code_.SetValue("ID", name + "Identifier()"); } else { code_.SetValue("ID", "nullptr"); } code_ += "inline bool Verify{{STRUCT_NAME}}Buffer("; code_ += " flatbuffers::Verifier &verifier) {"; code_ += " return verifier.VerifyBuffer<{{CPP_NAME}}>({{ID}});"; code_ += "}"; code_ += ""; code_ += "inline bool VerifySizePrefixed{{STRUCT_NAME}}Buffer("; code_ += " flatbuffers::Verifier &verifier) {"; code_ += " return verifier.VerifySizePrefixedBuffer<{{CPP_NAME}}>({{ID}});"; code_ += "}"; code_ += ""; if (parser_.file_extension_.length()) { // Return the extension code_ += "inline const char *{{STRUCT_NAME}}Extension() {"; code_ += " return \"" + parser_.file_extension_ + "\";"; code_ += "}"; code_ += ""; } // Finish a buffer with a given root object: code_ += "inline void Finish{{STRUCT_NAME}}Buffer("; code_ += " flatbuffers::FlatBufferBuilder &fbb,"; code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {"; if (parser_.file_identifier_.length()) code_ += " fbb.Finish(root, {{STRUCT_NAME}}Identifier());"; else code_ += " fbb.Finish(root);"; code_ += "}"; code_ += ""; code_ += "inline void FinishSizePrefixed{{STRUCT_NAME}}Buffer("; code_ += " flatbuffers::FlatBufferBuilder &fbb,"; code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {"; if (parser_.file_identifier_.length()) code_ += " fbb.FinishSizePrefixed(root, {{STRUCT_NAME}}Identifier());"; else code_ += " fbb.FinishSizePrefixed(root);"; code_ += "}"; code_ += ""; if (parser_.opts.generate_object_based_api) { // A convenient root unpack function. auto native_name = NativeName(WrapInNameSpace(struct_def), &struct_def, parser_.opts); code_.SetValue("UNPACK_RETURN", GenTypeNativePtr(native_name, nullptr, false)); code_.SetValue("UNPACK_TYPE", GenTypeNativePtr(native_name, nullptr, true)); code_ += "inline {{UNPACK_RETURN}} UnPack{{STRUCT_NAME}}("; code_ += " const void *buf,"; code_ += " const flatbuffers::resolver_function_t *res = nullptr) {"; code_ += " return {{UNPACK_TYPE}}\\"; code_ += "(Get{{STRUCT_NAME}}(buf)->UnPack(res));"; code_ += "}"; code_ += ""; } } if (cur_name_space_) SetNameSpace(nullptr); // Close the include guard. code_ += "#endif // " + include_guard; const auto file_path = GeneratedFileName(path_, file_name_); const auto final_code = code_.ToString(); return SaveFile(file_path.c_str(), final_code, false); } private: CodeWriter code_; std::unordered_set<std::string> keywords_; // This tracks the current namespace so we can insert namespace declarations. const Namespace *cur_name_space_; const Namespace *CurrentNameSpace() const { return cur_name_space_; } // Translates a qualified name in flatbuffer text format to the same name in // the equivalent C++ namespace. static std::string TranslateNameSpace(const std::string &qualified_name) { std::string cpp_qualified_name = qualified_name; size_t start_pos = 0; while ((start_pos = cpp_qualified_name.find(".", start_pos)) != std::string::npos) { cpp_qualified_name.replace(start_pos, 1, "::"); } return cpp_qualified_name; } void GenComment(const std::vector<std::string> &dc, const char *prefix = "") { std::string text; ::flatbuffers::GenComment(dc, &text, nullptr, prefix); code_ += text + "\\"; } // Return a C++ type from the table in idl.h std::string GenTypeBasic(const Type &type, bool user_facing_type) const { static const char * const ctypename[] = { // clang-format off #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, \ RTYPE) \ #CTYPE, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD // clang-format on }; if (user_facing_type) { if (type.enum_def) return WrapInNameSpace(*type.enum_def); if (type.base_type == BASE_TYPE_BOOL) return "bool"; } return ctypename[type.base_type]; } // Return a C++ pointer type, specialized to the actual struct/table types, // and vector element types. std::string GenTypePointer(const Type &type) const { switch (type.base_type) { case BASE_TYPE_STRING: { return "flatbuffers::String"; } case BASE_TYPE_VECTOR: { const auto type_name = GenTypeWire(type.VectorType(), "", false); return "flatbuffers::Vector<" + type_name + ">"; } case BASE_TYPE_STRUCT: { return WrapInNameSpace(*type.struct_def); } case BASE_TYPE_UNION: // fall through default: { return "void"; } } } // Return a C++ type for any type (scalar/pointer) specifically for // building a flatbuffer. std::string GenTypeWire(const Type &type, const char *postfix, bool user_facing_type) const { if (IsScalar(type.base_type)) { return GenTypeBasic(type, user_facing_type) + postfix; } else if (IsStruct(type)) { return "const " + GenTypePointer(type) + " *"; } else { return "flatbuffers::Offset<" + GenTypePointer(type) + ">" + postfix; } } // Return a C++ type for any type (scalar/pointer) that reflects its // serialized size. std::string GenTypeSize(const Type &type) const { if (IsScalar(type.base_type)) { return GenTypeBasic(type, false); } else if (IsStruct(type)) { return GenTypePointer(type); } else { return "flatbuffers::uoffset_t"; } } std::string NullableExtension() { return parser_.opts.gen_nullable ? " _Nullable " : ""; } static std::string NativeName(const std::string &name, const StructDef *sd, const IDLOptions &opts) { return sd && !sd->fixed ? opts.object_prefix + name + opts.object_suffix : name; } const std::string &PtrType(const FieldDef *field) { auto attr = field ? field->attributes.Lookup("cpp_ptr_type") : nullptr; return attr ? attr->constant : parser_.opts.cpp_object_api_pointer_type; } const std::string NativeString(const FieldDef *field) { auto attr = field ? field->attributes.Lookup("cpp_str_type") : nullptr; auto &ret = attr ? attr->constant : parser_.opts.cpp_object_api_string_type; if (ret.empty()) { return "std::string"; } return ret; } std::string GenTypeNativePtr(const std::string &type, const FieldDef *field, bool is_constructor) { auto &ptr_type = PtrType(field); if (ptr_type != "naked") { return (ptr_type != "default_ptr_type" ? ptr_type : parser_.opts.cpp_object_api_pointer_type) + "<" + type + ">"; } else if (is_constructor) { return ""; } else { return type + " *"; } } std::string GenPtrGet(const FieldDef &field) { auto cpp_ptr_type_get = field.attributes.Lookup("cpp_ptr_type_get"); if (cpp_ptr_type_get) return cpp_ptr_type_get->constant; auto &ptr_type = PtrType(&field); return ptr_type == "naked" ? "" : ".get()"; } std::string GenTypeNative(const Type &type, bool invector, const FieldDef &field) { switch (type.base_type) { case BASE_TYPE_STRING: { return NativeString(&field); } case BASE_TYPE_VECTOR: { const auto type_name = GenTypeNative(type.VectorType(), true, field); if (type.struct_def && type.struct_def->attributes.Lookup("native_custom_alloc")) { auto native_custom_alloc = type.struct_def->attributes.Lookup("native_custom_alloc"); return "std::vector<" + type_name + "," + native_custom_alloc->constant + "<" + type_name + ">>"; } else return "std::vector<" + type_name + ">"; } case BASE_TYPE_STRUCT: { auto type_name = WrapInNameSpace(*type.struct_def); if (IsStruct(type)) { auto native_type = type.struct_def->attributes.Lookup("native_type"); if (native_type) { type_name = native_type->constant; } if (invector || field.native_inline) { return type_name; } else { return GenTypeNativePtr(type_name, &field, false); } } else { return GenTypeNativePtr( NativeName(type_name, type.struct_def, parser_.opts), &field, false); } } case BASE_TYPE_UNION: { return type.enum_def->name + "Union"; } default: { return GenTypeBasic(type, true); } } } // Return a C++ type for any type (scalar/pointer) specifically for // using a flatbuffer. std::string GenTypeGet(const Type &type, const char *afterbasic, const char *beforeptr, const char *afterptr, bool user_facing_type) { if (IsScalar(type.base_type)) { return GenTypeBasic(type, user_facing_type) + afterbasic; } else { return beforeptr + GenTypePointer(type) + afterptr; } } std::string GenEnumDecl(const EnumDef &enum_def) const { const IDLOptions &opts = parser_.opts; return (opts.scoped_enums ? "enum class " : "enum ") + Name(enum_def); } std::string GenEnumValDecl(const EnumDef &enum_def, const std::string &enum_val) const { const IDLOptions &opts = parser_.opts; return opts.prefixed_enums ? Name(enum_def) + "_" + enum_val : enum_val; } std::string GetEnumValUse(const EnumDef &enum_def, const EnumVal &enum_val) const { const IDLOptions &opts = parser_.opts; if (opts.scoped_enums) { return Name(enum_def) + "::" + Name(enum_val); } else if (opts.prefixed_enums) { return Name(enum_def) + "_" + Name(enum_val); } else { return Name(enum_val); } } std::string StripUnionType(const std::string &name) { return name.substr(0, name.size() - strlen(UnionTypeFieldSuffix())); } std::string GetUnionElement(const EnumVal &ev, bool wrap, bool actual_type, bool native_type = false) { if (ev.union_type.base_type == BASE_TYPE_STRUCT) { auto name = actual_type ? ev.union_type.struct_def->name : Name(ev); return wrap ? WrapInNameSpace(ev.union_type.struct_def->defined_namespace, name) : name; } else if (ev.union_type.base_type == BASE_TYPE_STRING) { return actual_type ? (native_type ? "std::string" : "flatbuffers::String") : Name(ev); } else { FLATBUFFERS_ASSERT(false); return Name(ev); } } std::string UnionVerifySignature(const EnumDef &enum_def) { return "bool Verify" + Name(enum_def) + "(flatbuffers::Verifier &verifier, const void *obj, " + Name(enum_def) + " type)"; } std::string UnionVectorVerifySignature(const EnumDef &enum_def) { return "bool Verify" + Name(enum_def) + "Vector" + "(flatbuffers::Verifier &verifier, " + "const flatbuffers::Vector<flatbuffers::Offset<void>> *values, " + "const flatbuffers::Vector<uint8_t> *types)"; } std::string UnionUnPackSignature(const EnumDef &enum_def, bool inclass) { return (inclass ? "static " : "") + std::string("void *") + (inclass ? "" : Name(enum_def) + "Union::") + "UnPack(const void *obj, " + Name(enum_def) + " type, const flatbuffers::resolver_function_t *resolver)"; } std::string UnionPackSignature(const EnumDef &enum_def, bool inclass) { return "flatbuffers::Offset<void> " + (inclass ? "" : Name(enum_def) + "Union::") + "Pack(flatbuffers::FlatBufferBuilder &_fbb, " + "const flatbuffers::rehasher_function_t *_rehasher" + (inclass ? " = nullptr" : "") + ") const"; } std::string TableCreateSignature(const StructDef &struct_def, bool predecl, const IDLOptions &opts) { return "flatbuffers::Offset<" + Name(struct_def) + "> Create" + Name(struct_def) + "(flatbuffers::FlatBufferBuilder &_fbb, const " + NativeName(Name(struct_def), &struct_def, opts) + " *_o, const flatbuffers::rehasher_function_t *_rehasher" + (predecl ? " = nullptr" : "") + ")"; } std::string TablePackSignature(const StructDef &struct_def, bool inclass, const IDLOptions &opts) { return std::string(inclass ? "static " : "") + "flatbuffers::Offset<" + Name(struct_def) + "> " + (inclass ? "" : Name(struct_def) + "::") + "Pack(flatbuffers::FlatBufferBuilder &_fbb, " + "const " + NativeName(Name(struct_def), &struct_def, opts) + "* _o, " + "const flatbuffers::rehasher_function_t *_rehasher" + (inclass ? " = nullptr" : "") + ")"; } std::string TableUnPackSignature(const StructDef &struct_def, bool inclass, const IDLOptions &opts) { return NativeName(Name(struct_def), &struct_def, opts) + " *" + (inclass ? "" : Name(struct_def) + "::") + "UnPack(const flatbuffers::resolver_function_t *_resolver" + (inclass ? " = nullptr" : "") + ") const"; } std::string TableUnPackToSignature(const StructDef &struct_def, bool inclass, const IDLOptions &opts) { return "void " + (inclass ? "" : Name(struct_def) + "::") + "UnPackTo(" + NativeName(Name(struct_def), &struct_def, opts) + " *" + "_o, const flatbuffers::resolver_function_t *_resolver" + (inclass ? " = nullptr" : "") + ") const"; } void GenMiniReflectPre(const StructDef *struct_def) { code_.SetValue("NAME", struct_def->name); code_ += "inline const flatbuffers::TypeTable *{{NAME}}TypeTable();"; code_ += ""; } void GenMiniReflect(const StructDef *struct_def, const EnumDef *enum_def) { code_.SetValue("NAME", struct_def ? struct_def->name : enum_def->name); code_.SetValue("SEQ_TYPE", struct_def ? (struct_def->fixed ? "ST_STRUCT" : "ST_TABLE") : (enum_def->is_union ? "ST_UNION" : "ST_ENUM")); auto num_fields = struct_def ? struct_def->fields.vec.size() : enum_def->vals.vec.size(); code_.SetValue("NUM_FIELDS", NumToString(num_fields)); std::vector<std::string> names; std::vector<Type> types; bool consecutive_enum_from_zero = true; if (struct_def) { for (auto it = struct_def->fields.vec.begin(); it != struct_def->fields.vec.end(); ++it) { const auto &field = **it; names.push_back(Name(field)); types.push_back(field.value.type); } } else { for (auto it = enum_def->vals.vec.begin(); it != enum_def->vals.vec.end(); ++it) { const auto &ev = **it; names.push_back(Name(ev)); types.push_back(enum_def->is_union ? ev.union_type : Type(enum_def->underlying_type)); if (static_cast<int64_t>(it - enum_def->vals.vec.begin()) != ev.value) { consecutive_enum_from_zero = false; } } } std::string ts; std::vector<std::string> type_refs; for (auto it = types.begin(); it != types.end(); ++it) { auto &type = *it; if (!ts.empty()) ts += ",\n "; auto is_vector = type.base_type == BASE_TYPE_VECTOR; auto bt = is_vector ? type.element : type.base_type; auto et = IsScalar(bt) || bt == BASE_TYPE_STRING ? bt - BASE_TYPE_UTYPE + ET_UTYPE : ET_SEQUENCE; int ref_idx = -1; std::string ref_name = type.struct_def ? WrapInNameSpace(*type.struct_def) : type.enum_def ? WrapInNameSpace(*type.enum_def) : ""; if (!ref_name.empty()) { auto rit = type_refs.begin(); for (; rit != type_refs.end(); ++rit) { if (*rit == ref_name) { ref_idx = static_cast<int>(rit - type_refs.begin()); break; } } if (rit == type_refs.end()) { ref_idx = static_cast<int>(type_refs.size()); type_refs.push_back(ref_name); } } ts += "{ flatbuffers::" + std::string(ElementaryTypeNames()[et]) + ", " + NumToString(is_vector) + ", " + NumToString(ref_idx) + " }"; } std::string rs; for (auto it = type_refs.begin(); it != type_refs.end(); ++it) { if (!rs.empty()) rs += ",\n "; rs += *it + "TypeTable"; } std::string ns; for (auto it = names.begin(); it != names.end(); ++it) { if (!ns.empty()) ns += ",\n "; ns += "\"" + *it + "\""; } std::string vs; if (enum_def && !consecutive_enum_from_zero) { for (auto it = enum_def->vals.vec.begin(); it != enum_def->vals.vec.end(); ++it) { const auto &ev = **it; if (!vs.empty()) vs += ", "; vs += NumToString(ev.value); } } else if (struct_def && struct_def->fixed) { for (auto it = struct_def->fields.vec.begin(); it != struct_def->fields.vec.end(); ++it) { const auto &field = **it; vs += NumToString(field.value.offset); vs += ", "; } vs += NumToString(struct_def->bytesize); } code_.SetValue("TYPES", ts); code_.SetValue("REFS", rs); code_.SetValue("NAMES", ns); code_.SetValue("VALUES", vs); code_ += "inline const flatbuffers::TypeTable *{{NAME}}TypeTable() {"; if (num_fields) { code_ += " static const flatbuffers::TypeCode type_codes[] = {"; code_ += " {{TYPES}}"; code_ += " };"; } if (!type_refs.empty()) { code_ += " static const flatbuffers::TypeFunction type_refs[] = {"; code_ += " {{REFS}}"; code_ += " };"; } if (!vs.empty()) { code_ += " static const int64_t values[] = { {{VALUES}} };"; } auto has_names = num_fields && parser_.opts.mini_reflect == IDLOptions::kTypesAndNames; if (has_names) { code_ += " static const char * const names[] = {"; code_ += " {{NAMES}}"; code_ += " };"; } code_ += " static const flatbuffers::TypeTable tt = {"; code_ += std::string(" flatbuffers::{{SEQ_TYPE}}, {{NUM_FIELDS}}, ") + (num_fields ? "type_codes, " : "nullptr, ") + (!type_refs.empty() ? "type_refs, " : "nullptr, ") + (!vs.empty() ? "values, " : "nullptr, ") + (has_names ? "names" : "nullptr"); code_ += " };"; code_ += " return &tt;"; code_ += "}"; code_ += ""; } // Generate an enum declaration, // an enum string lookup table, // and an enum array of values void GenEnum(const EnumDef &enum_def) { code_.SetValue("ENUM_NAME", Name(enum_def)); code_.SetValue("BASE_TYPE", GenTypeBasic(enum_def.underlying_type, false)); code_.SetValue("SEP", ""); GenComment(enum_def.doc_comment); code_ += GenEnumDecl(enum_def) + "\\"; if (parser_.opts.scoped_enums) code_ += " : {{BASE_TYPE}}\\"; code_ += " {"; int64_t anyv = 0; const EnumVal *minv = nullptr, *maxv = nullptr; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; GenComment(ev.doc_comment, " "); code_.SetValue("KEY", GenEnumValDecl(enum_def, Name(ev))); code_.SetValue("VALUE", NumToString(ev.value)); code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\"; code_.SetValue("SEP", ",\n"); minv = !minv || minv->value > ev.value ? &ev : minv; maxv = !maxv || maxv->value < ev.value ? &ev : maxv; anyv |= ev.value; } if (parser_.opts.scoped_enums || parser_.opts.prefixed_enums) { FLATBUFFERS_ASSERT(minv && maxv); code_.SetValue("SEP", ",\n"); if (enum_def.attributes.Lookup("bit_flags")) { code_.SetValue("KEY", GenEnumValDecl(enum_def, "NONE")); code_.SetValue("VALUE", "0"); code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\"; code_.SetValue("KEY", GenEnumValDecl(enum_def, "ANY")); code_.SetValue("VALUE", NumToString(anyv)); code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\"; } else { // MIN & MAX are useless for bit_flags code_.SetValue("KEY", GenEnumValDecl(enum_def, "MIN")); code_.SetValue("VALUE", GenEnumValDecl(enum_def, minv->name)); code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\"; code_.SetValue("KEY", GenEnumValDecl(enum_def, "MAX")); code_.SetValue("VALUE", GenEnumValDecl(enum_def, maxv->name)); code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\"; } } code_ += ""; code_ += "};"; if (parser_.opts.scoped_enums && enum_def.attributes.Lookup("bit_flags")) { code_ += "FLATBUFFERS_DEFINE_BITMASK_OPERATORS({{ENUM_NAME}}, {{BASE_TYPE}})"; } code_ += ""; // Generate an array of all enumeration values auto num_fields = NumToString(enum_def.vals.vec.size()); code_ += "inline const {{ENUM_NAME}} (&EnumValues{{ENUM_NAME}}())[" + num_fields + "] {"; code_ += " static const {{ENUM_NAME}} values[] = {"; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; auto value = GetEnumValUse(enum_def, ev); auto suffix = *it != enum_def.vals.vec.back() ? "," : ""; code_ += " " + value + suffix; } code_ += " };"; code_ += " return values;"; code_ += "}"; code_ += ""; // Generate a generate string table for enum values. // Problem is, if values are very sparse that could generate really big // tables. Ideally in that case we generate a map lookup instead, but for // the moment we simply don't output a table at all. auto range = enum_def.vals.vec.back()->value - enum_def.vals.vec.front()->value + 1; // Average distance between values above which we consider a table // "too sparse". Change at will. static const int kMaxSparseness = 5; if (range / static_cast<int64_t>(enum_def.vals.vec.size()) < kMaxSparseness) { code_ += "inline const char * const *EnumNames{{ENUM_NAME}}() {"; code_ += " static const char * const names[] = {"; auto val = enum_def.vals.vec.front()->value; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; while (val++ != ev.value) { code_ += " \"\","; } code_ += " \"" + Name(ev) + "\","; } code_ += " nullptr"; code_ += " };"; code_ += " return names;"; code_ += "}"; code_ += ""; code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {"; code_ += " if (e < " + GetEnumValUse(enum_def, *enum_def.vals.vec.front()) + " || e > " + GetEnumValUse(enum_def, *enum_def.vals.vec.back()) + ") return \"\";"; code_ += " const size_t index = static_cast<int>(e)\\"; if (enum_def.vals.vec.front()->value) { auto vals = GetEnumValUse(enum_def, *enum_def.vals.vec.front()); code_ += " - static_cast<int>(" + vals + ")\\"; } code_ += ";"; code_ += " return EnumNames{{ENUM_NAME}}()[index];"; code_ += "}"; code_ += ""; } else { code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {"; code_ += " switch (e) {"; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; code_ += " case " + GetEnumValUse(enum_def, ev) + ": return \"" + Name(ev) + "\";"; } code_ += " default: return \"\";"; code_ += " }"; code_ += "}"; code_ += ""; } // Generate type traits for unions to map from a type to union enum value. if (enum_def.is_union && !enum_def.uses_multiple_type_instances) { for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; if (it == enum_def.vals.vec.begin()) { code_ += "template<typename T> struct {{ENUM_NAME}}Traits {"; } else { auto name = GetUnionElement(ev, true, true); code_ += "template<> struct {{ENUM_NAME}}Traits<" + name + "> {"; } auto value = GetEnumValUse(enum_def, ev); code_ += " static const {{ENUM_NAME}} enum_value = " + value + ";"; code_ += "};"; code_ += ""; } } if (parser_.opts.generate_object_based_api && enum_def.is_union) { // Generate a union type code_.SetValue("NAME", Name(enum_def)); code_.SetValue("NONE", GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE"))); code_ += "struct {{NAME}}Union {"; code_ += " {{NAME}} type;"; code_ += " void *value;"; code_ += ""; code_ += " {{NAME}}Union() : type({{NONE}}), value(nullptr) {}"; code_ += " {{NAME}}Union({{NAME}}Union&& u) FLATBUFFERS_NOEXCEPT :"; code_ += " type({{NONE}}), value(nullptr)"; code_ += " { std::swap(type, u.type); std::swap(value, u.value); }"; code_ += " {{NAME}}Union(const {{NAME}}Union &) FLATBUFFERS_NOEXCEPT;"; code_ += " {{NAME}}Union &operator=(const {{NAME}}Union &u) " "FLATBUFFERS_NOEXCEPT"; code_ += " { {{NAME}}Union t(u); std::swap(type, t.type); std::swap(value, " "t.value); return *this; }"; code_ += " {{NAME}}Union &operator=({{NAME}}Union &&u) FLATBUFFERS_NOEXCEPT"; code_ += " { std::swap(type, u.type); std::swap(value, u.value); return " "*this; }"; code_ += " ~{{NAME}}Union() { Reset(); }"; code_ += ""; code_ += " void Reset();"; code_ += ""; if (!enum_def.uses_multiple_type_instances) { code_ += "#ifndef FLATBUFFERS_CPP98_STL"; code_ += " template <typename T>"; code_ += " void Set(T&& val) {"; code_ += " Reset();"; code_ += " type = {{NAME}}Traits<typename T::TableType>::enum_value;"; code_ += " if (type != {{NONE}}) {"; code_ += " value = new T(std::forward<T>(val));"; code_ += " }"; code_ += " }"; code_ += "#endif // FLATBUFFERS_CPP98_STL"; code_ += ""; } code_ += " " + UnionUnPackSignature(enum_def, true) + ";"; code_ += " " + UnionPackSignature(enum_def, true) + ";"; code_ += ""; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; if (!ev.value) { continue; } const auto native_type = NativeName(GetUnionElement(ev, true, true, true), ev.union_type.struct_def, parser_.opts); code_.SetValue("NATIVE_TYPE", native_type); code_.SetValue("NATIVE_NAME", Name(ev)); code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev)); code_ += " {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() {"; code_ += " return type == {{NATIVE_ID}} ?"; code_ += " reinterpret_cast<{{NATIVE_TYPE}} *>(value) : nullptr;"; code_ += " }"; code_ += " const {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() const {"; code_ += " return type == {{NATIVE_ID}} ?"; code_ += " reinterpret_cast<const {{NATIVE_TYPE}} *>(value) : nullptr;"; code_ += " }"; } code_ += "};"; code_ += ""; if (parser_.opts.gen_compare) { code_ += ""; code_ += "inline bool operator==(const {{NAME}}Union &lhs, const {{NAME}}Union &rhs) {"; code_ += " if (lhs.type != rhs.type) return false;"; code_ += " switch (lhs.type) {"; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev)); if (ev.value) { const auto native_type = NativeName(GetUnionElement(ev, true, true, true), ev.union_type.struct_def, parser_.opts); code_.SetValue("NATIVE_TYPE", native_type); code_ += " case {{NATIVE_ID}}: {"; code_ += " return *(reinterpret_cast<const {{NATIVE_TYPE}} *>(lhs.value)) =="; code_ += " *(reinterpret_cast<const {{NATIVE_TYPE}} *>(rhs.value));"; code_ += " }"; } else { code_ += " case {{NATIVE_ID}}: {"; code_ += " return true;"; // "NONE" enum value. code_ += " }"; } } code_ += " default: {"; code_ += " return false;"; code_ += " }"; code_ += " }"; code_ += "}"; } } if (enum_def.is_union) { code_ += UnionVerifySignature(enum_def) + ";"; code_ += UnionVectorVerifySignature(enum_def) + ";"; code_ += ""; } } void GenUnionPost(const EnumDef &enum_def) { // Generate a verifier function for this union that can be called by the // table verifier functions. It uses a switch case to select a specific // verifier function to call, this should be safe even if the union type // has been corrupted, since the verifiers will simply fail when called // on the wrong type. code_.SetValue("ENUM_NAME", Name(enum_def)); code_ += "inline " + UnionVerifySignature(enum_def) + " {"; code_ += " switch (type) {"; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; code_.SetValue("LABEL", GetEnumValUse(enum_def, ev)); if (ev.value) { code_.SetValue("TYPE", GetUnionElement(ev, true, true)); code_ += " case {{LABEL}}: {"; auto getptr = " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);"; if (ev.union_type.base_type == BASE_TYPE_STRUCT) { if (ev.union_type.struct_def->fixed) { code_ += " return true;"; } else { code_ += getptr; code_ += " return verifier.VerifyTable(ptr);"; } } else if (ev.union_type.base_type == BASE_TYPE_STRING) { code_ += getptr; code_ += " return verifier.VerifyString(ptr);"; } else { FLATBUFFERS_ASSERT(false); } code_ += " }"; } else { code_ += " case {{LABEL}}: {"; code_ += " return true;"; // "NONE" enum value. code_ += " }"; } } code_ += " default: return false;"; code_ += " }"; code_ += "}"; code_ += ""; code_ += "inline " + UnionVectorVerifySignature(enum_def) + " {"; code_ += " if (!values || !types) return !values && !types;"; code_ += " if (values->size() != types->size()) return false;"; code_ += " for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {"; code_ += " if (!Verify" + Name(enum_def) + "("; code_ += " verifier, values->Get(i), types->GetEnum<" + Name(enum_def) + ">(i))) {"; code_ += " return false;"; code_ += " }"; code_ += " }"; code_ += " return true;"; code_ += "}"; code_ += ""; if (parser_.opts.generate_object_based_api) { // Generate union Unpack() and Pack() functions. code_ += "inline " + UnionUnPackSignature(enum_def, false) + " {"; code_ += " switch (type) {"; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; if (!ev.value) { continue; } code_.SetValue("LABEL", GetEnumValUse(enum_def, ev)); code_.SetValue("TYPE", GetUnionElement(ev, true, true)); code_ += " case {{LABEL}}: {"; code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);"; if (ev.union_type.base_type == BASE_TYPE_STRUCT) { if (ev.union_type.struct_def->fixed) { code_ += " return new " + WrapInNameSpace(*ev.union_type.struct_def) + "(*ptr);"; } else { code_ += " return ptr->UnPack(resolver);"; } } else if (ev.union_type.base_type == BASE_TYPE_STRING) { code_ += " return new std::string(ptr->c_str(), ptr->size());"; } else { FLATBUFFERS_ASSERT(false); } code_ += " }"; } code_ += " default: return nullptr;"; code_ += " }"; code_ += "}"; code_ += ""; code_ += "inline " + UnionPackSignature(enum_def, false) + " {"; code_ += " switch (type) {"; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { auto &ev = **it; if (!ev.value) { continue; } code_.SetValue("LABEL", GetEnumValUse(enum_def, ev)); code_.SetValue("TYPE", NativeName(GetUnionElement(ev, true, true, true), ev.union_type.struct_def, parser_.opts)); code_.SetValue("NAME", GetUnionElement(ev, false, true)); code_ += " case {{LABEL}}: {"; code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(value);"; if (ev.union_type.base_type == BASE_TYPE_STRUCT) { if (ev.union_type.struct_def->fixed) { code_ += " return _fbb.CreateStruct(*ptr).Union();"; } else { code_ += " return Create{{NAME}}(_fbb, ptr, _rehasher).Union();"; } } else if (ev.union_type.base_type == BASE_TYPE_STRING) { code_ += " return _fbb.CreateString(*ptr).Union();"; } else { FLATBUFFERS_ASSERT(false); } code_ += " }"; } code_ += " default: return 0;"; code_ += " }"; code_ += "}"; code_ += ""; // Union copy constructor code_ += "inline {{ENUM_NAME}}Union::{{ENUM_NAME}}Union(const " "{{ENUM_NAME}}Union &u) FLATBUFFERS_NOEXCEPT : type(u.type), " "value(nullptr) {"; code_ += " switch (type) {"; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; if (!ev.value) { continue; } code_.SetValue("LABEL", GetEnumValUse(enum_def, ev)); code_.SetValue("TYPE", NativeName(GetUnionElement(ev, true, true, true), ev.union_type.struct_def, parser_.opts)); code_ += " case {{LABEL}}: {"; bool copyable = true; if (ev.union_type.base_type == BASE_TYPE_STRUCT) { // Don't generate code to copy if table is not copyable. // TODO(wvo): make tables copyable instead. for (auto fit = ev.union_type.struct_def->fields.vec.begin(); fit != ev.union_type.struct_def->fields.vec.end(); ++fit) { const auto &field = **fit; if (!field.deprecated && field.value.type.struct_def && !field.native_inline) { copyable = false; break; } } } if (copyable) { code_ += " value = new {{TYPE}}(*reinterpret_cast<{{TYPE}} *>" "(u.value));"; } else { code_ += " FLATBUFFERS_ASSERT(false); // {{TYPE}} not copyable."; } code_ += " break;"; code_ += " }"; } code_ += " default:"; code_ += " break;"; code_ += " }"; code_ += "}"; code_ += ""; // Union Reset() function. code_.SetValue("NONE", GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE"))); code_ += "inline void {{ENUM_NAME}}Union::Reset() {"; code_ += " switch (type) {"; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; if (!ev.value) { continue; } code_.SetValue("LABEL", GetEnumValUse(enum_def, ev)); code_.SetValue("TYPE", NativeName(GetUnionElement(ev, true, true, true), ev.union_type.struct_def, parser_.opts)); code_ += " case {{LABEL}}: {"; code_ += " auto ptr = reinterpret_cast<{{TYPE}} *>(value);"; code_ += " delete ptr;"; code_ += " break;"; code_ += " }"; } code_ += " default: break;"; code_ += " }"; code_ += " value = nullptr;"; code_ += " type = {{NONE}};"; code_ += "}"; code_ += ""; } } // Generates a value with optionally a cast applied if the field has a // different underlying type from its interface type (currently only the // case for enums. "from" specify the direction, true meaning from the // underlying type to the interface type. std::string GenUnderlyingCast(const FieldDef &field, bool from, const std::string &val) { if (from && field.value.type.base_type == BASE_TYPE_BOOL) { return val + " != 0"; } else if ((field.value.type.enum_def && IsScalar(field.value.type.base_type)) || field.value.type.base_type == BASE_TYPE_BOOL) { return "static_cast<" + GenTypeBasic(field.value.type, from) + ">(" + val + ")"; } else { return val; } } std::string GenFieldOffsetName(const FieldDef &field) { std::string uname = Name(field); std::transform(uname.begin(), uname.end(), uname.begin(), ToUpper); return "VT_" + uname; } void GenFullyQualifiedNameGetter(const StructDef &struct_def, const std::string &name) { if (!parser_.opts.generate_name_strings) { return; } auto fullname = struct_def.defined_namespace->GetFullyQualifiedName(name); code_.SetValue("NAME", fullname); code_.SetValue("CONSTEXPR", "FLATBUFFERS_CONSTEXPR"); code_ += " static {{CONSTEXPR}} const char *GetFullyQualifiedName() {"; code_ += " return \"{{NAME}}\";"; code_ += " }"; } std::string GenDefaultConstant(const FieldDef &field) { return field.value.type.base_type == BASE_TYPE_FLOAT ? field.value.constant + "f" : field.value.constant; } std::string GetDefaultScalarValue(const FieldDef &field, bool is_ctor) { if (field.value.type.enum_def && IsScalar(field.value.type.base_type)) { auto ev = field.value.type.enum_def->ReverseLookup( StringToInt(field.value.constant.c_str()), false); if (ev) { return WrapInNameSpace(field.value.type.enum_def->defined_namespace, GetEnumValUse(*field.value.type.enum_def, *ev)); } else { return GenUnderlyingCast(field, true, field.value.constant); } } else if (field.value.type.base_type == BASE_TYPE_BOOL) { return field.value.constant == "0" ? "false" : "true"; } else if (field.attributes.Lookup("cpp_type")) { if (is_ctor) { if (PtrType(&field) == "naked") { return "nullptr"; } else { return ""; } } else { return "0"; } } else { return GenDefaultConstant(field); } } void GenParam(const FieldDef &field, bool direct, const char *prefix) { code_.SetValue("PRE", prefix); code_.SetValue("PARAM_NAME", Name(field)); if (direct && field.value.type.base_type == BASE_TYPE_STRING) { code_.SetValue("PARAM_TYPE", "const char *"); code_.SetValue("PARAM_VALUE", "nullptr"); } else if (direct && field.value.type.base_type == BASE_TYPE_VECTOR) { const auto vtype = field.value.type.VectorType(); std::string type; if (IsStruct(vtype)) { type = WrapInNameSpace(*vtype.struct_def); } else { type = GenTypeWire(vtype, "", false); } code_.SetValue("PARAM_TYPE", "const std::vector<" + type + "> *"); code_.SetValue("PARAM_VALUE", "nullptr"); } else { code_.SetValue("PARAM_TYPE", GenTypeWire(field.value.type, " ", true)); code_.SetValue("PARAM_VALUE", GetDefaultScalarValue(field, false)); } code_ += "{{PRE}}{{PARAM_TYPE}}{{PARAM_NAME}} = {{PARAM_VALUE}}\\"; } // Generate a member, including a default value for scalars and raw pointers. void GenMember(const FieldDef &field) { if (!field.deprecated && // Deprecated fields won't be accessible. field.value.type.base_type != BASE_TYPE_UTYPE && (field.value.type.base_type != BASE_TYPE_VECTOR || field.value.type.element != BASE_TYPE_UTYPE)) { auto type = GenTypeNative(field.value.type, false, field); auto cpp_type = field.attributes.Lookup("cpp_type"); auto full_type = (cpp_type ? (field.value.type.base_type == BASE_TYPE_VECTOR ? "std::vector<" + GenTypeNativePtr(cpp_type->constant, &field, false) + "> " : GenTypeNativePtr(cpp_type->constant, &field, false)) : type + " "); code_.SetValue("FIELD_TYPE", full_type); code_.SetValue("FIELD_NAME", Name(field)); code_ += " {{FIELD_TYPE}}{{FIELD_NAME}};"; } } // Generate the default constructor for this struct. Properly initialize all // scalar members with default values. void GenDefaultConstructor(const StructDef &struct_def) { std::string initializer_list; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated && // Deprecated fields won't be accessible. field.value.type.base_type != BASE_TYPE_UTYPE) { auto cpp_type = field.attributes.Lookup("cpp_type"); auto native_default = field.attributes.Lookup("native_default"); // Scalar types get parsed defaults, raw pointers get nullptrs. if (IsScalar(field.value.type.base_type)) { if (!initializer_list.empty()) { initializer_list += ",\n "; } initializer_list += Name(field); initializer_list += "(" + (native_default ? std::string(native_default->constant) : GetDefaultScalarValue(field, true)) + ")"; } else if (field.value.type.base_type == BASE_TYPE_STRUCT) { if (IsStruct(field.value.type)) { if (native_default) { if (!initializer_list.empty()) { initializer_list += ",\n "; } initializer_list += Name(field) + "(" + native_default->constant + ")"; } } } else if (cpp_type && field.value.type.base_type != BASE_TYPE_VECTOR) { if (!initializer_list.empty()) { initializer_list += ",\n "; } initializer_list += Name(field) + "(0)"; } } } if (!initializer_list.empty()) { initializer_list = "\n : " + initializer_list; } code_.SetValue("NATIVE_NAME", NativeName(Name(struct_def), &struct_def, parser_.opts)); code_.SetValue("INIT_LIST", initializer_list); code_ += " {{NATIVE_NAME}}(){{INIT_LIST}} {"; code_ += " }"; } void GenCompareOperator(const StructDef &struct_def, std::string accessSuffix = "") { std::string compare_op; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated && // Deprecated fields won't be accessible. field.value.type.base_type != BASE_TYPE_UTYPE && (field.value.type.base_type != BASE_TYPE_VECTOR || field.value.type.element != BASE_TYPE_UTYPE)) { if (!compare_op.empty()) { compare_op += " &&\n "; } auto accessor = Name(field) + accessSuffix; compare_op += "(lhs." + accessor + " == rhs." + accessor + ")"; } } std::string cmp_lhs; std::string cmp_rhs; if (compare_op.empty()) { cmp_lhs = ""; cmp_rhs = ""; compare_op = " return true;"; } else { cmp_lhs = "lhs"; cmp_rhs = "rhs"; compare_op = " return\n " + compare_op + ";"; } code_.SetValue("CMP_OP", compare_op); code_.SetValue("CMP_LHS", cmp_lhs); code_.SetValue("CMP_RHS", cmp_rhs); code_ += ""; code_ += "inline bool operator==(const {{NATIVE_NAME}} &{{CMP_LHS}}, const {{NATIVE_NAME}} &{{CMP_RHS}}) {"; code_ += "{{CMP_OP}}"; code_ += "}"; } void GenOperatorNewDelete(const StructDef &struct_def) { if (auto native_custom_alloc = struct_def.attributes.Lookup("native_custom_alloc")) { code_ += " inline void *operator new (std::size_t count) {"; code_ += " return " + native_custom_alloc->constant + "<{{NATIVE_NAME}}>().allocate(count / sizeof({{NATIVE_NAME}}));"; code_ += " }"; code_ += " inline void operator delete (void *ptr) {"; code_ += " return " + native_custom_alloc->constant + "<{{NATIVE_NAME}}>().deallocate(static_cast<{{NATIVE_NAME}}*>(" "ptr),1);"; code_ += " }"; } } void GenNativeTable(const StructDef &struct_def) { const auto native_name = NativeName(Name(struct_def), &struct_def, parser_.opts); code_.SetValue("STRUCT_NAME", Name(struct_def)); code_.SetValue("NATIVE_NAME", native_name); // Generate a C++ object that can hold an unpacked version of this table. code_ += "struct {{NATIVE_NAME}} : public flatbuffers::NativeTable {"; code_ += " typedef {{STRUCT_NAME}} TableType;"; GenFullyQualifiedNameGetter(struct_def, native_name); for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { GenMember(**it); } GenOperatorNewDelete(struct_def); GenDefaultConstructor(struct_def); code_ += "};"; if (parser_.opts.gen_compare) GenCompareOperator(struct_def); code_ += ""; } // Generate the code to call the appropriate Verify function(s) for a field. void GenVerifyCall(const FieldDef &field, const char *prefix) { code_.SetValue("PRE", prefix); code_.SetValue("NAME", Name(field)); code_.SetValue("REQUIRED", field.required ? "Required" : ""); code_.SetValue("SIZE", GenTypeSize(field.value.type)); code_.SetValue("OFFSET", GenFieldOffsetName(field)); if (IsScalar(field.value.type.base_type) || IsStruct(field.value.type)) { code_ += "{{PRE}}VerifyField{{REQUIRED}}<{{SIZE}}>(verifier, {{OFFSET}})\\"; } else { code_ += "{{PRE}}VerifyOffset{{REQUIRED}}(verifier, {{OFFSET}})\\"; } switch (field.value.type.base_type) { case BASE_TYPE_UNION: { code_.SetValue("ENUM_NAME", field.value.type.enum_def->name); code_.SetValue("SUFFIX", UnionTypeFieldSuffix()); code_ += "{{PRE}}Verify{{ENUM_NAME}}(verifier, {{NAME}}(), " "{{NAME}}{{SUFFIX}}())\\"; break; } case BASE_TYPE_STRUCT: { if (!field.value.type.struct_def->fixed) { code_ += "{{PRE}}verifier.VerifyTable({{NAME}}())\\"; } break; } case BASE_TYPE_STRING: { code_ += "{{PRE}}verifier.VerifyString({{NAME}}())\\"; break; } case BASE_TYPE_VECTOR: { code_ += "{{PRE}}verifier.VerifyVector({{NAME}}())\\"; switch (field.value.type.element) { case BASE_TYPE_STRING: { code_ += "{{PRE}}verifier.VerifyVectorOfStrings({{NAME}}())\\"; break; } case BASE_TYPE_STRUCT: { if (!field.value.type.struct_def->fixed) { code_ += "{{PRE}}verifier.VerifyVectorOfTables({{NAME}}())\\"; } break; } case BASE_TYPE_UNION: { code_.SetValue("ENUM_NAME", field.value.type.enum_def->name); code_ += "{{PRE}}Verify{{ENUM_NAME}}Vector(verifier, {{NAME}}(), " "{{NAME}}_type())\\"; break; } default: break; } break; } default: { break; } } } // Generate CompareWithValue method for a key field. void GenKeyFieldMethods(const FieldDef &field) { FLATBUFFERS_ASSERT(field.key); const bool is_string = (field.value.type.base_type == BASE_TYPE_STRING); code_ += " bool KeyCompareLessThan(const {{STRUCT_NAME}} *o) const {"; if (is_string) { // use operator< of flatbuffers::String code_ += " return *{{FIELD_NAME}}() < *o->{{FIELD_NAME}}();"; } else { code_ += " return {{FIELD_NAME}}() < o->{{FIELD_NAME}}();"; } code_ += " }"; if (is_string) { code_ += " int KeyCompareWithValue(const char *val) const {"; code_ += " return strcmp({{FIELD_NAME}}()->c_str(), val);"; code_ += " }"; } else { FLATBUFFERS_ASSERT(IsScalar(field.value.type.base_type)); auto type = GenTypeBasic(field.value.type, false); if (parser_.opts.scoped_enums && field.value.type.enum_def && IsScalar(field.value.type.base_type)) { type = GenTypeGet(field.value.type, " ", "const ", " *", true); } // Returns {field<val: -1, field==val: 0, field>val: +1}. code_.SetValue("KEY_TYPE", type); code_ += " int KeyCompareWithValue({{KEY_TYPE}} val) const {"; code_ += " return static_cast<int>({{FIELD_NAME}}() > val) - " "static_cast<int>({{FIELD_NAME}}() < val);"; code_ += " }"; } } // Generate an accessor struct, builder structs & function for a table. void GenTable(const StructDef &struct_def) { if (parser_.opts.generate_object_based_api) { GenNativeTable(struct_def); } // Generate an accessor struct, with methods of the form: // type name() const { return GetField<type>(offset, defaultval); } GenComment(struct_def.doc_comment); code_.SetValue("STRUCT_NAME", Name(struct_def)); code_ += "struct {{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS" " : private flatbuffers::Table {"; if (parser_.opts.generate_object_based_api) { code_ += " typedef {{NATIVE_NAME}} NativeTableType;"; } if (parser_.opts.mini_reflect != IDLOptions::kNone) { code_ += " static const flatbuffers::TypeTable *MiniReflectTypeTable() {"; code_ += " return {{STRUCT_NAME}}TypeTable();"; code_ += " }"; } GenFullyQualifiedNameGetter(struct_def, Name(struct_def)); // Generate field id constants. if (struct_def.fields.vec.size() > 0) { // We need to add a trailing comma to all elements except the last one as // older versions of gcc complain about this. code_.SetValue("SEP", ""); code_ += " enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated) { // Deprecated fields won't be accessible. continue; } code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field)); code_.SetValue("OFFSET_VALUE", NumToString(field.value.offset)); code_ += "{{SEP}} {{OFFSET_NAME}} = {{OFFSET_VALUE}}\\"; code_.SetValue("SEP", ",\n"); } code_ += ""; code_ += " };"; } // Generate the accessors. for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated) { // Deprecated fields won't be accessible. continue; } const bool is_struct = IsStruct(field.value.type); const bool is_scalar = IsScalar(field.value.type.base_type); code_.SetValue("FIELD_NAME", Name(field)); // Call a different accessor for pointers, that indirects. std::string accessor = ""; if (is_scalar) { accessor = "GetField<"; } else if (is_struct) { accessor = "GetStruct<"; } else { accessor = "GetPointer<"; } auto offset_str = GenFieldOffsetName(field); auto offset_type = GenTypeGet(field.value.type, "", "const ", " *", false); auto call = accessor + offset_type + ">(" + offset_str; // Default value as second arg for non-pointer types. if (is_scalar) { call += ", " + GenDefaultConstant(field); } call += ")"; std::string afterptr = " *" + NullableExtension(); GenComment(field.doc_comment, " "); code_.SetValue("FIELD_TYPE", GenTypeGet(field.value.type, " ", "const ", afterptr.c_str(), true)); code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, call)); code_.SetValue("NULLABLE_EXT", NullableExtension()); code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {"; code_ += " return {{FIELD_VALUE}};"; code_ += " }"; if (field.value.type.base_type == BASE_TYPE_UNION) { auto u = field.value.type.enum_def; if (!field.value.type.enum_def->uses_multiple_type_instances) code_ += " template<typename T> " "const T *{{NULLABLE_EXT}}{{FIELD_NAME}}_as() const;"; for (auto u_it = u->vals.vec.begin(); u_it != u->vals.vec.end(); ++u_it) { auto &ev = **u_it; if (ev.union_type.base_type == BASE_TYPE_NONE) { continue; } auto full_struct_name = GetUnionElement(ev, true, true); // @TODO: Mby make this decisions more universal? How? code_.SetValue( "U_GET_TYPE", EscapeKeyword(field.name + UnionTypeFieldSuffix())); code_.SetValue( "U_ELEMENT_TYPE", WrapInNameSpace(u->defined_namespace, GetEnumValUse(*u, ev))); code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *"); code_.SetValue("U_FIELD_NAME", Name(field) + "_as_" + Name(ev)); code_.SetValue("U_NULLABLE", NullableExtension()); // `const Type *union_name_asType() const` accessor. code_ += " {{U_FIELD_TYPE}}{{U_NULLABLE}}{{U_FIELD_NAME}}() const {"; code_ += " return {{U_GET_TYPE}}() == {{U_ELEMENT_TYPE}} ? " "static_cast<{{U_FIELD_TYPE}}>({{FIELD_NAME}}()) " ": nullptr;"; code_ += " }"; } } if (parser_.opts.mutable_buffer) { if (is_scalar) { const auto type = GenTypeWire(field.value.type, "", false); code_.SetValue("SET_FN", "SetField<" + type + ">"); code_.SetValue("OFFSET_NAME", offset_str); code_.SetValue("FIELD_TYPE", GenTypeBasic(field.value.type, true)); code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, false, "_" + Name(field))); code_.SetValue("DEFAULT_VALUE", GenDefaultConstant(field)); code_ += " bool mutate_{{FIELD_NAME}}({{FIELD_TYPE}} " "_{{FIELD_NAME}}) {"; code_ += " return {{SET_FN}}({{OFFSET_NAME}}, {{FIELD_VALUE}}, " "{{DEFAULT_VALUE}});"; code_ += " }"; } else { auto postptr = " *" + NullableExtension(); auto type = GenTypeGet(field.value.type, " ", "", postptr.c_str(), true); auto underlying = accessor + type + ">(" + offset_str + ")"; code_.SetValue("FIELD_TYPE", type); code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, underlying)); code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {"; code_ += " return {{FIELD_VALUE}};"; code_ += " }"; } } auto nested = field.attributes.Lookup("nested_flatbuffer"); if (nested) { std::string qualified_name = nested->constant; auto nested_root = parser_.LookupStruct(nested->constant); if (nested_root == nullptr) { qualified_name = parser_.current_namespace_->GetFullyQualifiedName( nested->constant); nested_root = parser_.LookupStruct(qualified_name); } FLATBUFFERS_ASSERT(nested_root); // Guaranteed to exist by parser. (void)nested_root; code_.SetValue("CPP_NAME", TranslateNameSpace(qualified_name)); code_ += " const {{CPP_NAME}} *{{FIELD_NAME}}_nested_root() const {"; code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>({{FIELD_NAME}}()->Data());"; code_ += " }"; } if (field.flexbuffer) { code_ += " flexbuffers::Reference {{FIELD_NAME}}_flexbuffer_root()" " const {"; // Both Data() and size() are const-methods, therefore call order doesn't matter. code_ += " return flexbuffers::GetRoot({{FIELD_NAME}}()->Data(), " "{{FIELD_NAME}}()->size());"; code_ += " }"; } // Generate a comparison function for this field if it is a key. if (field.key) { GenKeyFieldMethods(field); } } // Generate a verifier function that can check a buffer from an untrusted // source will never cause reads outside the buffer. code_ += " bool Verify(flatbuffers::Verifier &verifier) const {"; code_ += " return VerifyTableStart(verifier)\\"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated) { continue; } GenVerifyCall(field, " &&\n "); } code_ += " &&\n verifier.EndTable();"; code_ += " }"; if (parser_.opts.generate_object_based_api) { // Generate the UnPack() pre declaration. code_ += " " + TableUnPackSignature(struct_def, true, parser_.opts) + ";"; code_ += " " + TableUnPackToSignature(struct_def, true, parser_.opts) + ";"; code_ += " " + TablePackSignature(struct_def, true, parser_.opts) + ";"; } code_ += "};"; // End of table. code_ += ""; // Explicit specializations for union accessors for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated || field.value.type.base_type != BASE_TYPE_UNION) { continue; } auto u = field.value.type.enum_def; if (u->uses_multiple_type_instances) continue; code_.SetValue("FIELD_NAME", Name(field)); for (auto u_it = u->vals.vec.begin(); u_it != u->vals.vec.end(); ++u_it) { auto &ev = **u_it; if (ev.union_type.base_type == BASE_TYPE_NONE) { continue; } auto full_struct_name = GetUnionElement(ev, true, true); code_.SetValue( "U_ELEMENT_TYPE", WrapInNameSpace(u->defined_namespace, GetEnumValUse(*u, ev))); code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *"); code_.SetValue("U_ELEMENT_NAME", full_struct_name); code_.SetValue("U_FIELD_NAME", Name(field) + "_as_" + Name(ev)); // `template<> const T *union_name_as<T>() const` accessor. code_ += "template<> " "inline {{U_FIELD_TYPE}}{{STRUCT_NAME}}::{{FIELD_NAME}}_as" "<{{U_ELEMENT_NAME}}>() const {"; code_ += " return {{U_FIELD_NAME}}();"; code_ += "}"; code_ += ""; } } GenBuilders(struct_def); if (parser_.opts.generate_object_based_api) { // Generate a pre-declaration for a CreateX method that works with an // unpacked C++ object. code_ += TableCreateSignature(struct_def, true, parser_.opts) + ";"; code_ += ""; } } void GenBuilders(const StructDef &struct_def) { code_.SetValue("STRUCT_NAME", Name(struct_def)); // Generate a builder struct: code_ += "struct {{STRUCT_NAME}}Builder {"; code_ += " flatbuffers::FlatBufferBuilder &fbb_;"; code_ += " flatbuffers::uoffset_t start_;"; bool has_string_or_vector_fields = false; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { const bool is_scalar = IsScalar(field.value.type.base_type); const bool is_string = field.value.type.base_type == BASE_TYPE_STRING; const bool is_vector = field.value.type.base_type == BASE_TYPE_VECTOR; if (is_string || is_vector) { has_string_or_vector_fields = true; } std::string offset = GenFieldOffsetName(field); std::string name = GenUnderlyingCast(field, false, Name(field)); std::string value = is_scalar ? GenDefaultConstant(field) : ""; // Generate accessor functions of the form: // void add_name(type name) { // fbb_.AddElement<type>(offset, name, default); // } code_.SetValue("FIELD_NAME", Name(field)); code_.SetValue("FIELD_TYPE", GenTypeWire(field.value.type, " ", true)); code_.SetValue("ADD_OFFSET", Name(struct_def) + "::" + offset); code_.SetValue("ADD_NAME", name); code_.SetValue("ADD_VALUE", value); if (is_scalar) { const auto type = GenTypeWire(field.value.type, "", false); code_.SetValue("ADD_FN", "AddElement<" + type + ">"); } else if (IsStruct(field.value.type)) { code_.SetValue("ADD_FN", "AddStruct"); } else { code_.SetValue("ADD_FN", "AddOffset"); } code_ += " void add_{{FIELD_NAME}}({{FIELD_TYPE}}{{FIELD_NAME}}) {"; code_ += " fbb_.{{ADD_FN}}(\\"; if (is_scalar) { code_ += "{{ADD_OFFSET}}, {{ADD_NAME}}, {{ADD_VALUE}});"; } else { code_ += "{{ADD_OFFSET}}, {{ADD_NAME}});"; } code_ += " }"; } } // Builder constructor code_ += " explicit {{STRUCT_NAME}}Builder(flatbuffers::FlatBufferBuilder " "&_fbb)"; code_ += " : fbb_(_fbb) {"; code_ += " start_ = fbb_.StartTable();"; code_ += " }"; // Assignment operator; code_ += " {{STRUCT_NAME}}Builder &operator=" "(const {{STRUCT_NAME}}Builder &);"; // Finish() function. code_ += " flatbuffers::Offset<{{STRUCT_NAME}}> Finish() {"; code_ += " const auto end = fbb_.EndTable(start_);"; code_ += " auto o = flatbuffers::Offset<{{STRUCT_NAME}}>(end);"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated && field.required) { code_.SetValue("FIELD_NAME", Name(field)); code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field)); code_ += " fbb_.Required(o, {{STRUCT_NAME}}::{{OFFSET_NAME}});"; } } code_ += " return o;"; code_ += " }"; code_ += "};"; code_ += ""; // Generate a convenient CreateX function that uses the above builder // to create a table in one go. code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> " "Create{{STRUCT_NAME}}("; code_ += " flatbuffers::FlatBufferBuilder &_fbb\\"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { GenParam(field, false, ",\n "); } } code_ += ") {"; code_ += " {{STRUCT_NAME}}Builder builder_(_fbb);"; for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1; size; size /= 2) { for (auto it = struct_def.fields.vec.rbegin(); it != struct_def.fields.vec.rend(); ++it) { const auto &field = **it; if (!field.deprecated && (!struct_def.sortbysize || size == SizeOf(field.value.type.base_type))) { code_.SetValue("FIELD_NAME", Name(field)); code_ += " builder_.add_{{FIELD_NAME}}({{FIELD_NAME}});"; } } } code_ += " return builder_.Finish();"; code_ += "}"; code_ += ""; // Generate a CreateXDirect function with vector types as parameters if (has_string_or_vector_fields) { code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> " "Create{{STRUCT_NAME}}Direct("; code_ += " flatbuffers::FlatBufferBuilder &_fbb\\"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { GenParam(field, true, ",\n "); } } // Need to call "Create" with the struct namespace. const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create"); code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name)); code_ += ") {"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { code_.SetValue("FIELD_NAME", Name(field)); if (field.value.type.base_type == BASE_TYPE_STRING) { code_ += " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? " "_fbb.CreateString({{FIELD_NAME}}) : 0;"; } else if (field.value.type.base_type == BASE_TYPE_VECTOR) { code_ += " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? \\"; const auto vtype = field.value.type.VectorType(); if (IsStruct(vtype)) { const auto type = WrapInNameSpace(*vtype.struct_def); code_ += "_fbb.CreateVectorOfStructs<" + type + ">\\"; } else { const auto type = GenTypeWire(vtype, "", false); code_ += "_fbb.CreateVector<" + type + ">\\"; } code_ += "(*{{FIELD_NAME}}) : 0;"; } } } code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}("; code_ += " _fbb\\"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { code_.SetValue("FIELD_NAME", Name(field)); code_ += ",\n {{FIELD_NAME}}\\"; if (field.value.type.base_type == BASE_TYPE_STRING || field.value.type.base_type == BASE_TYPE_VECTOR) { code_ += "__\\"; } } } code_ += ");"; code_ += "}"; code_ += ""; } } std::string GenUnionUnpackVal(const FieldDef &afield, const char *vec_elem_access, const char *vec_type_access) { return afield.value.type.enum_def->name + "Union::UnPack(" + "_e" + vec_elem_access + ", " + EscapeKeyword(afield.name + UnionTypeFieldSuffix()) + "()" + vec_type_access + ", _resolver)"; } std::string GenUnpackVal(const Type &type, const std::string &val, bool invector, const FieldDef &afield) { switch (type.base_type) { case BASE_TYPE_STRING: { return val + "->str()"; } case BASE_TYPE_STRUCT: { const auto name = WrapInNameSpace(*type.struct_def); if (IsStruct(type)) { auto native_type = type.struct_def->attributes.Lookup("native_type"); if (native_type) { return "flatbuffers::UnPack(*" + val + ")"; } else if (invector || afield.native_inline) { return "*" + val; } else { const auto ptype = GenTypeNativePtr(name, &afield, true); return ptype + "(new " + name + "(*" + val + "))"; } } else { const auto ptype = GenTypeNativePtr( NativeName(name, type.struct_def, parser_.opts), &afield, true); return ptype + "(" + val + "->UnPack(_resolver))"; } } case BASE_TYPE_UNION: { return GenUnionUnpackVal( afield, invector ? "->Get(_i)" : "", invector ? ("->GetEnum<" + type.enum_def->name + ">(_i)").c_str() : ""); } default: { return val; break; } } }; std::string GenUnpackFieldStatement(const FieldDef &field, const FieldDef *union_field) { std::string code; switch (field.value.type.base_type) { case BASE_TYPE_VECTOR: { auto cpp_type = field.attributes.Lookup("cpp_type"); std::string indexing; if (field.value.type.enum_def) { indexing += "static_cast<" + WrapInNameSpace(*field.value.type.enum_def) + ">("; } indexing += "_e->Get(_i)"; if (field.value.type.enum_def) { indexing += ")"; } if (field.value.type.element == BASE_TYPE_BOOL) { indexing += " != 0"; } // Generate code that pushes data from _e to _o in the form: // for (uoffset_t i = 0; i < _e->size(); ++i) { // _o->field.push_back(_e->Get(_i)); // } auto name = Name(field); if (field.value.type.element == BASE_TYPE_UTYPE) { name = StripUnionType(Name(field)); } auto access = field.value.type.element == BASE_TYPE_UTYPE ? ".type" : (field.value.type.element == BASE_TYPE_UNION ? ".value" : ""); code += "{ _o->" + name + ".resize(_e->size()); "; code += "for (flatbuffers::uoffset_t _i = 0;"; code += " _i < _e->size(); _i++) { "; if (cpp_type) { // Generate code that resolves the cpp pointer type, of the form: // if (resolver) // (*resolver)(&_o->field, (hash_value_t)(_e)); // else // _o->field = nullptr; code += "//vector resolver, " + PtrType(&field) + "\n"; code += "if (_resolver) "; code += "(*_resolver)"; code += "(reinterpret_cast<void **>(&_o->" + name + "[_i]" + access + "), "; code += "static_cast<flatbuffers::hash_value_t>(" + indexing + "));"; if (PtrType(&field) == "naked") { code += " else "; code += "_o->" + name + "[_i]" + access + " = nullptr"; } else { //code += " else "; //code += "_o->" + name + "[_i]" + access + " = " + GenTypeNativePtr(cpp_type->constant, &field, true) + "();"; code += "/* else do nothing */"; } } else { code += "_o->" + name + "[_i]" + access + " = "; code += GenUnpackVal(field.value.type.VectorType(), indexing, true, field); } code += "; } }"; break; } case BASE_TYPE_UTYPE: { FLATBUFFERS_ASSERT(union_field->value.type.base_type == BASE_TYPE_UNION); // Generate code that sets the union type, of the form: // _o->field.type = _e; code += "_o->" + union_field->name + ".type = _e;"; break; } case BASE_TYPE_UNION: { // Generate code that sets the union value, of the form: // _o->field.value = Union::Unpack(_e, field_type(), resolver); code += "_o->" + Name(field) + ".value = "; code += GenUnionUnpackVal(field, "", ""); code += ";"; break; } default: { auto cpp_type = field.attributes.Lookup("cpp_type"); if (cpp_type) { // Generate code that resolves the cpp pointer type, of the form: // if (resolver) // (*resolver)(&_o->field, (hash_value_t)(_e)); // else // _o->field = nullptr; code += "//scalar resolver, " + PtrType(&field) + " \n"; code += "if (_resolver) "; code += "(*_resolver)"; code += "(reinterpret_cast<void **>(&_o->" + Name(field) + "), "; code += "static_cast<flatbuffers::hash_value_t>(_e));"; if (PtrType(&field) == "naked") { code += " else "; code += "_o->" + Name(field) + " = nullptr;"; } else { //code += " else "; //code += "_o->" + Name(field) + " = " + GenTypeNativePtr(cpp_type->constant, &field, true) + "();"; code += "/* else do nothing */;"; } } else { // Generate code for assigning the value, of the form: // _o->field = value; code += "_o->" + Name(field) + " = "; code += GenUnpackVal(field.value.type, "_e", false, field) + ";"; } break; } } return code; } std::string GenCreateParam(const FieldDef &field) { const IDLOptions &opts = parser_.opts; std::string value = "_o->"; if (field.value.type.base_type == BASE_TYPE_UTYPE) { value += StripUnionType(Name(field)); value += ".type"; } else { value += Name(field); } if (field.value.type.base_type != BASE_TYPE_VECTOR && field.attributes.Lookup("cpp_type")) { auto type = GenTypeBasic(field.value.type, false); value = "_rehasher ? " "static_cast<" + type + ">((*_rehasher)(" + value + GenPtrGet(field) + ")) : 0"; } std::string code; switch (field.value.type.base_type) { // String fields are of the form: // _fbb.CreateString(_o->field) case BASE_TYPE_STRING: { code += "_fbb.CreateString(" + value + ")"; // For optional fields, check to see if there actually is any data // in _o->field before attempting to access it. If there isn't, // depending on set_empty_to_null either set it to 0 or an empty string. if (!field.required) { auto empty_value = opts.set_empty_to_null ? "0" : "_fbb.CreateSharedString(\"\")"; code = value + ".empty() ? " + empty_value + " : " + code; } break; } // Vector fields come in several flavours, of the forms: // _fbb.CreateVector(_o->field); // _fbb.CreateVector((const utype*)_o->field.data(), _o->field.size()); // _fbb.CreateVectorOfStrings(_o->field) // _fbb.CreateVectorOfStructs(_o->field) // _fbb.CreateVector<Offset<T>>(_o->field.size() [&](size_t i) { // return CreateT(_fbb, _o->Get(i), rehasher); // }); case BASE_TYPE_VECTOR: { auto vector_type = field.value.type.VectorType(); switch (vector_type.base_type) { case BASE_TYPE_STRING: { code += "_fbb.CreateVectorOfStrings(" + value + ")"; break; } case BASE_TYPE_STRUCT: { if (IsStruct(vector_type)) { auto native_type = field.value.type.struct_def->attributes.Lookup("native_type"); if (native_type) { code += "_fbb.CreateVectorOfNativeStructs<"; code += WrapInNameSpace(*vector_type.struct_def) + ">"; } else { code += "_fbb.CreateVectorOfStructs"; } code += "(" + value + ")"; } else { code += "_fbb.CreateVector<flatbuffers::Offset<"; code += WrapInNameSpace(*vector_type.struct_def) + ">> "; code += "(" + value + ".size(), "; code += "[](size_t i, _VectorArgs *__va) { "; code += "return Create" + vector_type.struct_def->name; code += "(*__va->__fbb, __va->_" + value + "[i]" + GenPtrGet(field) + ", "; code += "__va->__rehasher); }, &_va )"; } break; } case BASE_TYPE_BOOL: { code += "_fbb.CreateVector(" + value + ")"; break; } case BASE_TYPE_UNION: { code += "_fbb.CreateVector<flatbuffers::" "Offset<void>>(" + value + ".size(), [](size_t i, _VectorArgs *__va) { " "return __va->_" + value + "[i].Pack(*__va->__fbb, __va->__rehasher); }, &_va)"; break; } case BASE_TYPE_UTYPE: { value = StripUnionType(value); code += "_fbb.CreateVector<uint8_t>(" + value + ".size(), [](size_t i, _VectorArgs *__va) { " "return static_cast<uint8_t>(__va->_" + value + "[i].type); }, &_va)"; break; } default: { if (field.value.type.enum_def) { // For enumerations, we need to get access to the array data for // the underlying storage type (eg. uint8_t). const auto basetype = GenTypeBasic( field.value.type.enum_def->underlying_type, false); code += "_fbb.CreateVectorScalarCast<" + basetype + ">(flatbuffers::data(" + value + "), " + value + ".size())"; } else if (field.attributes.Lookup("cpp_type")) { auto type = GenTypeBasic(vector_type, false); code += "_fbb.CreateVector<" + type + ">(" + value + ".size(), "; code += "[](size_t i, _VectorArgs *__va) { "; code += "return __va->__rehasher ? "; code += "static_cast<" + type + ">((*__va->__rehasher)"; code += "(__va->_" + value + "[i]" + GenPtrGet(field) + ")) : 0"; code += "; }, &_va )"; } else { code += "_fbb.CreateVector(" + value + ")"; } break; } } // If set_empty_to_null option is enabled, for optional fields, check to // see if there actually is any data in _o->field before attempting to // access it. if (opts.set_empty_to_null && !field.required) { code = value + ".size() ? " + code + " : 0"; } break; } case BASE_TYPE_UNION: { // _o->field.Pack(_fbb); code += value + ".Pack(_fbb)"; break; } case BASE_TYPE_STRUCT: { if (IsStruct(field.value.type)) { auto native_type = field.value.type.struct_def->attributes.Lookup("native_type"); if (native_type) { code += "flatbuffers::Pack(" + value + ")"; } else if (field.native_inline) { code += "&" + value; } else { code += value + " ? " + value + GenPtrGet(field) + " : 0"; } } else { // _o->field ? CreateT(_fbb, _o->field.get(), _rehasher); const auto type = field.value.type.struct_def->name; code += value + " ? Create" + type; code += "(_fbb, " + value + GenPtrGet(field) + ", _rehasher)"; code += " : 0"; } break; } default: { code += value; break; } } return code; } // Generate code for tables that needs to come after the regular definition. void GenTablePost(const StructDef &struct_def) { code_.SetValue("STRUCT_NAME", Name(struct_def)); code_.SetValue("NATIVE_NAME", NativeName(Name(struct_def), &struct_def, parser_.opts)); if (parser_.opts.generate_object_based_api) { // Generate the X::UnPack() method. code_ += "inline " + TableUnPackSignature(struct_def, false, parser_.opts) + " {"; code_ += " auto _o = new {{NATIVE_NAME}}();"; code_ += " UnPackTo(_o, _resolver);"; code_ += " return _o;"; code_ += "}"; code_ += ""; code_ += "inline " + TableUnPackToSignature(struct_def, false, parser_.opts) + " {"; code_ += " (void)_o;"; code_ += " (void)_resolver;"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated) { continue; } // Assign a value from |this| to |_o|. Values from |this| are stored // in a variable |_e| by calling this->field_type(). The value is then // assigned to |_o| using the GenUnpackFieldStatement. const bool is_union = field.value.type.base_type == BASE_TYPE_UTYPE; const auto statement = GenUnpackFieldStatement(field, is_union ? *(it + 1) : nullptr); code_.SetValue("FIELD_NAME", Name(field)); auto prefix = " { auto _e = {{FIELD_NAME}}(); "; auto check = IsScalar(field.value.type.base_type) ? "" : "if (_e) "; auto postfix = " };"; code_ += std::string(prefix) + check + statement + postfix; } code_ += "}"; code_ += ""; // Generate the X::Pack member function that simply calls the global // CreateX function. code_ += "inline " + TablePackSignature(struct_def, false, parser_.opts) + " {"; code_ += " return Create{{STRUCT_NAME}}(_fbb, _o, _rehasher);"; code_ += "}"; code_ += ""; // Generate a CreateX method that works with an unpacked C++ object. code_ += "inline " + TableCreateSignature(struct_def, false, parser_.opts) + " {"; code_ += " (void)_rehasher;"; code_ += " (void)_o;"; code_ += " struct _VectorArgs " "{ flatbuffers::FlatBufferBuilder *__fbb; " "const " + NativeName(Name(struct_def), &struct_def, parser_.opts) + "* __o; " "const flatbuffers::rehasher_function_t *__rehasher; } _va = { " "&_fbb, _o, _rehasher}; (void)_va;"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) { continue; } code_ += " auto _" + Name(field) + " = " + GenCreateParam(field) + ";"; } // Need to call "Create" with the struct namespace. const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create"); code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name)); code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}("; code_ += " _fbb\\"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) { continue; } bool pass_by_address = false; if (field.value.type.base_type == BASE_TYPE_STRUCT) { if (IsStruct(field.value.type)) { auto native_type = field.value.type.struct_def->attributes.Lookup("native_type"); if (native_type) { pass_by_address = true; } } } // Call the CreateX function using values from |_o|. if (pass_by_address) { code_ += ",\n &_" + Name(field) + "\\"; } else { code_ += ",\n _" + Name(field) + "\\"; } } code_ += ");"; code_ += "}"; code_ += ""; } } static void GenPadding( const FieldDef &field, std::string *code_ptr, int *id, const std::function<void(int bits, std::string *code_ptr, int *id)> &f) { if (field.padding) { for (int i = 0; i < 4; i++) { if (static_cast<int>(field.padding) & (1 << i)) { f((1 << i) * 8, code_ptr, id); } } FLATBUFFERS_ASSERT(!(field.padding & ~0xF)); } } static void PaddingDefinition(int bits, std::string *code_ptr, int *id) { *code_ptr += " int" + NumToString(bits) + "_t padding" + NumToString((*id)++) + "__;"; } static void PaddingInitializer(int bits, std::string *code_ptr, int *id) { (void)bits; *code_ptr += ",\n padding" + NumToString((*id)++) + "__(0)"; } static void PaddingNoop(int bits, std::string *code_ptr, int *id) { (void)bits; *code_ptr += " (void)padding" + NumToString((*id)++) + "__;"; } // Generate an accessor struct with constructor for a flatbuffers struct. void GenStruct(const StructDef &struct_def) { // Generate an accessor struct, with private variables of the form: // type name_; // Generates manual padding and alignment. // Variables are private because they contain little endian data on all // platforms. GenComment(struct_def.doc_comment); code_.SetValue("ALIGN", NumToString(struct_def.minalign)); code_.SetValue("STRUCT_NAME", Name(struct_def)); code_ += "FLATBUFFERS_MANUALLY_ALIGNED_STRUCT({{ALIGN}}) " "{{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS {"; code_ += " private:"; int padding_id = 0; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; code_.SetValue("FIELD_TYPE", GenTypeGet(field.value.type, " ", "", " ", false)); code_.SetValue("FIELD_NAME", Name(field)); code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}_;"; if (field.padding) { std::string padding; GenPadding(field, &padding, &padding_id, PaddingDefinition); code_ += padding; } } // Generate GetFullyQualifiedName code_ += ""; code_ += " public:"; GenFullyQualifiedNameGetter(struct_def, Name(struct_def)); // Generate a default constructor. code_ += " {{STRUCT_NAME}}() {"; code_ += " memset(this, 0, sizeof({{STRUCT_NAME}}));"; code_ += " }"; // Generate a constructor that takes all fields as arguments. std::string arg_list; std::string init_list; padding_id = 0; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; const auto member_name = Name(field) + "_"; const auto arg_name = "_" + Name(field); const auto arg_type = GenTypeGet(field.value.type, " ", "const ", " &", true); if (it != struct_def.fields.vec.begin()) { arg_list += ", "; init_list += ",\n "; } arg_list += arg_type; arg_list += arg_name; init_list += member_name; if (IsScalar(field.value.type.base_type)) { auto type = GenUnderlyingCast(field, false, arg_name); init_list += "(flatbuffers::EndianScalar(" + type + "))"; } else { init_list += "(" + arg_name + ")"; } if (field.padding) { GenPadding(field, &init_list, &padding_id, PaddingInitializer); } } if (!arg_list.empty()) { code_.SetValue("ARG_LIST", arg_list); code_.SetValue("INIT_LIST", init_list); code_ += " {{STRUCT_NAME}}({{ARG_LIST}})"; code_ += " : {{INIT_LIST}} {"; padding_id = 0; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.padding) { std::string padding; GenPadding(field, &padding, &padding_id, PaddingNoop); code_ += padding; } } code_ += " }"; } // Generate accessor methods of the form: // type name() const { return flatbuffers::EndianScalar(name_); } for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; auto field_type = GenTypeGet(field.value.type, " ", "const ", " &", true); auto is_scalar = IsScalar(field.value.type.base_type); auto member = Name(field) + "_"; auto value = is_scalar ? "flatbuffers::EndianScalar(" + member + ")" : member; code_.SetValue("FIELD_NAME", Name(field)); code_.SetValue("FIELD_TYPE", field_type); code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, value)); GenComment(field.doc_comment, " "); code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {"; code_ += " return {{FIELD_VALUE}};"; code_ += " }"; if (parser_.opts.mutable_buffer) { auto mut_field_type = GenTypeGet(field.value.type, " ", "", " &", true); code_.SetValue("FIELD_TYPE", mut_field_type); if (is_scalar) { code_.SetValue("ARG", GenTypeBasic(field.value.type, true)); code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, false, "_" + Name(field))); code_ += " void mutate_{{FIELD_NAME}}({{ARG}} _{{FIELD_NAME}}) {"; code_ += " flatbuffers::WriteScalar(&{{FIELD_NAME}}_, " "{{FIELD_VALUE}});"; code_ += " }"; } else { code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {"; code_ += " return {{FIELD_NAME}}_;"; code_ += " }"; } } // Generate a comparison function for this field if it is a key. if (field.key) { GenKeyFieldMethods(field); } } code_.SetValue("NATIVE_NAME", Name(struct_def)); GenOperatorNewDelete(struct_def); code_ += "};"; code_.SetValue("STRUCT_BYTE_SIZE", NumToString(struct_def.bytesize)); code_ += "FLATBUFFERS_STRUCT_END({{STRUCT_NAME}}, {{STRUCT_BYTE_SIZE}});"; if (parser_.opts.gen_compare) GenCompareOperator(struct_def, "()"); code_ += ""; } // Set up the correct namespace. Only open a namespace if the existing one is // different (closing/opening only what is necessary). // // The file must start and end with an empty (or null) namespace so that // namespaces are properly opened and closed. void SetNameSpace(const Namespace *ns) { if (cur_name_space_ == ns) { return; } // Compute the size of the longest common namespace prefix. // If cur_name_space is A::B::C::D and ns is A::B::E::F::G, // the common prefix is A::B:: and we have old_size = 4, new_size = 5 // and common_prefix_size = 2 size_t old_size = cur_name_space_ ? cur_name_space_->components.size() : 0; size_t new_size = ns ? ns->components.size() : 0; size_t common_prefix_size = 0; while (common_prefix_size < old_size && common_prefix_size < new_size && ns->components[common_prefix_size] == cur_name_space_->components[common_prefix_size]) { common_prefix_size++; } // Close cur_name_space in reverse order to reach the common prefix. // In the previous example, D then C are closed. for (size_t j = old_size; j > common_prefix_size; --j) { code_ += "} // namespace " + cur_name_space_->components[j - 1]; } if (old_size != common_prefix_size) { code_ += ""; } // open namespace parts to reach the ns namespace // in the previous example, E, then F, then G are opened for (auto j = common_prefix_size; j != new_size; ++j) { code_ += "namespace " + ns->components[j] + " {"; } if (new_size != common_prefix_size) { code_ += ""; } cur_name_space_ = ns; } }; } // namespace cpp bool GenerateCPP(const Parser &parser, const std::string &path, const std::string &file_name) { cpp::CppGenerator generator(parser, path, file_name); return generator.generate(); } std::string CPPMakeRule(const Parser &parser, const std::string &path, const std::string &file_name) { const auto filebase = flatbuffers::StripPath(flatbuffers::StripExtension(file_name)); const auto included_files = parser.GetIncludedFilesRecursive(file_name); std::string make_rule = GeneratedFileName(path, filebase) + ": "; for (auto it = included_files.begin(); it != included_files.end(); ++it) { make_rule += " " + *it; } return make_rule; } } // namespace flatbuffers
1
14,546
You can delete this `NONE` case.
google-flatbuffers
java
@@ -5420,8 +5420,8 @@ TEST_F(VkLayerTest, RenderPassCreateAttachmentReadOnlyButCleared) { } } -TEST_F(VkLayerTest, RenderPassCreateAttachmentUsedTwiceColor) { - TEST_DESCRIPTION("Attachment is used simultaneously as two color attachments. This is usually unintended."); +TEST_F(VkLayerTest, RenderPassCreateAttachmentMismatchingLayoutsColor) { + TEST_DESCRIPTION("Attachment is used simultaneously as two color attachments with different layouts."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
1
/* * Copyright (c) 2015-2019 The Khronos Group Inc. * Copyright (c) 2015-2019 Valve Corporation * Copyright (c) 2015-2019 LunarG, Inc. * Copyright (c) 2015-2019 Google, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Author: Chia-I Wu <[email protected]> * Author: Chris Forbes <[email protected]> * Author: Courtney Goeltzenleuchter <[email protected]> * Author: Mark Lobodzinski <[email protected]> * Author: Mike Stroyan <[email protected]> * Author: Tobin Ehlis <[email protected]> * Author: Tony Barbour <[email protected]> * Author: Cody Northrop <[email protected]> * Author: Dave Houlton <[email protected]> * Author: Jeremy Kniager <[email protected]> * Author: Shannon McPherson <[email protected]> */ #ifdef ANDROID #include "vulkan_wrapper.h" #else #define NOMINMAX #include <vulkan/vulkan.h> #endif #include "layers/vk_device_profile_api_layer.h" #if defined(ANDROID) && defined(VALIDATION_APK) #include <android/log.h> #include <android_native_app_glue.h> #endif #include "icd-spv.h" #include "test_common.h" #include "vk_layer_config.h" #include "vk_format_utils.h" #include "vkrenderframework.h" #include "vk_typemap_helper.h" #include "convert_to_renderpass2.h" #include <algorithm> #include <cmath> #include <functional> #include <limits> #include <memory> #include <unordered_set> //-------------------------------------------------------------------------------------- // Mesh and VertexFormat Data //-------------------------------------------------------------------------------------- const char *kSkipPrefix = " TEST SKIPPED:"; enum BsoFailSelect { BsoFailNone, BsoFailLineWidth, BsoFailDepthBias, BsoFailViewport, BsoFailScissor, BsoFailBlend, BsoFailDepthBounds, BsoFailStencilReadMask, BsoFailStencilWriteMask, BsoFailStencilReference, BsoFailCmdClearAttachments, BsoFailIndexBuffer, BsoFailIndexBufferBadSize, BsoFailIndexBufferBadOffset, BsoFailIndexBufferBadMapSize, BsoFailIndexBufferBadMapOffset }; static const char bindStateVertShaderText[] = "#version 450\n" "vec2 vertices[3];\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" "}\n"; static const char bindStateFragShaderText[] = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; // Static arrays helper template <class ElementT, size_t array_size> size_t size(ElementT (&)[array_size]) { return array_size; } // Format search helper VkFormat FindSupportedDepthStencilFormat(VkPhysicalDevice phy) { VkFormat ds_formats[] = {VK_FORMAT_D16_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT}; for (uint32_t i = 0; i < sizeof(ds_formats); i++) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(phy, ds_formats[i], &format_props); if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) { return ds_formats[i]; } } return VK_FORMAT_UNDEFINED; } // Returns true if *any* requested features are available. // Assumption is that the framework can successfully create an image as // long as at least one of the feature bits is present (excepting VTX_BUF). bool ImageFormatIsSupported(VkPhysicalDevice phy, VkFormat format, VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL, VkFormatFeatureFlags features = ~VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(phy, format, &format_props); VkFormatFeatureFlags phy_features = (VK_IMAGE_TILING_OPTIMAL == tiling ? format_props.optimalTilingFeatures : format_props.linearTilingFeatures); return (0 != (phy_features & features)); } // Returns true if format and *all* requested features are available. bool ImageFormatAndFeaturesSupported(VkPhysicalDevice phy, VkFormat format, VkImageTiling tiling, VkFormatFeatureFlags features) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(phy, format, &format_props); VkFormatFeatureFlags phy_features = (VK_IMAGE_TILING_OPTIMAL == tiling ? format_props.optimalTilingFeatures : format_props.linearTilingFeatures); return (features == (phy_features & features)); } // Returns true if format and *all* requested features are available. bool ImageFormatAndFeaturesSupported(const VkInstance inst, const VkPhysicalDevice phy, const VkImageCreateInfo info, const VkFormatFeatureFlags features) { // Verify physical device support of format features if (!ImageFormatAndFeaturesSupported(phy, info.format, info.tiling, features)) { return false; } // Verify that PhysDevImageFormatProp() also claims support for the specific usage VkImageFormatProperties props; VkResult err = vkGetPhysicalDeviceImageFormatProperties(phy, info.format, info.imageType, info.tiling, info.usage, info.flags, &props); if (VK_SUCCESS != err) { return false; } #if 0 // Convinced this chunk doesn't currently add any additional info, but leaving in place because it may be // necessary with future extensions // Verify again using version 2, if supported, which *can* return more property data than the original... // (It's not clear that this is any more definitive than using the original version - but no harm) PFN_vkGetPhysicalDeviceImageFormatProperties2KHR p_GetPDIFP2KHR = (PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceImageFormatProperties2KHR"); if (NULL != p_GetPDIFP2KHR) { VkPhysicalDeviceImageFormatInfo2KHR fmt_info{}; fmt_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR; fmt_info.pNext = nullptr; fmt_info.format = info.format; fmt_info.type = info.imageType; fmt_info.tiling = info.tiling; fmt_info.usage = info.usage; fmt_info.flags = info.flags; VkImageFormatProperties2KHR fmt_props = {}; fmt_props.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR; err = p_GetPDIFP2KHR(phy, &fmt_info, &fmt_props); if (VK_SUCCESS != err) { return false; } } #endif return true; } // Validation report callback prototype static VKAPI_ATTR VkBool32 VKAPI_CALL myDbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg, void *pUserData); // Simple sane SamplerCreateInfo boilerplate static VkSamplerCreateInfo SafeSaneSamplerCreateInfo() { VkSamplerCreateInfo sampler_create_info = {}; sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; sampler_create_info.pNext = nullptr; sampler_create_info.magFilter = VK_FILTER_NEAREST; sampler_create_info.minFilter = VK_FILTER_NEAREST; sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; sampler_create_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler_create_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler_create_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler_create_info.mipLodBias = 0.0; sampler_create_info.anisotropyEnable = VK_FALSE; sampler_create_info.maxAnisotropy = 1.0; sampler_create_info.compareEnable = VK_FALSE; sampler_create_info.compareOp = VK_COMPARE_OP_NEVER; sampler_create_info.minLod = 0.0; sampler_create_info.maxLod = 16.0; sampler_create_info.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; sampler_create_info.unnormalizedCoordinates = VK_FALSE; return sampler_create_info; } // Dependent "false" type for the static assert, as GCC will evaluate // non-dependent static_asserts even for non-instantiated templates template <typename T> struct AlwaysFalse : std::false_type {}; // Helpers to get nearest greater or smaller value (of float) -- useful for testing the boundary cases of Vulkan limits template <typename T> T NearestGreater(const T from) { using Lim = std::numeric_limits<T>; const auto positive_direction = Lim::has_infinity ? Lim::infinity() : Lim::max(); return std::nextafter(from, positive_direction); } template <typename T> T NearestSmaller(const T from) { using Lim = std::numeric_limits<T>; const auto negative_direction = Lim::has_infinity ? -Lim::infinity() : Lim::lowest(); return std::nextafter(from, negative_direction); } // ErrorMonitor Usage: // // Call SetDesiredFailureMsg with a string to be compared against all // encountered log messages, or a validation error enum identifying // desired error message. Passing NULL or VALIDATION_ERROR_MAX_ENUM // will match all log messages. logMsg will return true for skipCall // only if msg is matched or NULL. // // Call VerifyFound to determine if all desired failure messages // were encountered. Call VerifyNotFound to determine if any unexpected // failure was encountered. class ErrorMonitor { public: ErrorMonitor() { test_platform_thread_create_mutex(&mutex_); test_platform_thread_lock_mutex(&mutex_); Reset(); test_platform_thread_unlock_mutex(&mutex_); } ~ErrorMonitor() { test_platform_thread_delete_mutex(&mutex_); } // Set monitor to pristine state void Reset() { message_flags_ = VK_DEBUG_REPORT_ERROR_BIT_EXT; bailout_ = NULL; message_found_ = VK_FALSE; failure_message_strings_.clear(); desired_message_strings_.clear(); ignore_message_strings_.clear(); other_messages_.clear(); } // ErrorMonitor will look for an error message containing the specified string(s) void SetDesiredFailureMsg(const VkFlags msgFlags, const std::string msg) { SetDesiredFailureMsg(msgFlags, msg.c_str()); } void SetDesiredFailureMsg(const VkFlags msgFlags, const char *const msgString) { test_platform_thread_lock_mutex(&mutex_); desired_message_strings_.insert(msgString); message_flags_ |= msgFlags; test_platform_thread_unlock_mutex(&mutex_); } // ErrorMonitor will look for an error message containing the specified string(s) template <typename Iter> void SetDesiredFailureMsg(const VkFlags msgFlags, Iter iter, const Iter end) { for (; iter != end; ++iter) { SetDesiredFailureMsg(msgFlags, *iter); } } // Set an error that the error monitor will ignore. Do not use this function if you are creating a new test. // TODO: This is stopgap to block new unexpected errors from being introduced. The long-term goal is to remove the use of this // function and its definition. void SetUnexpectedError(const char *const msg) { test_platform_thread_lock_mutex(&mutex_); ignore_message_strings_.emplace_back(msg); test_platform_thread_unlock_mutex(&mutex_); } VkBool32 CheckForDesiredMsg(const char *const msgString) { VkBool32 result = VK_FALSE; test_platform_thread_lock_mutex(&mutex_); if (bailout_ != nullptr) { *bailout_ = true; } string errorString(msgString); bool found_expected = false; if (!IgnoreMessage(errorString)) { for (auto desired_msg_it = desired_message_strings_.begin(); desired_msg_it != desired_message_strings_.end(); ++desired_msg_it) { if ((*desired_msg_it).length() == 0) { // An empty desired_msg string "" indicates a positive test - not expecting an error. // Return true to avoid calling layers/driver with this error. // And don't erase the "" string, so it remains if another error is found. result = VK_TRUE; found_expected = true; message_found_ = true; failure_message_strings_.insert(errorString); } else if (errorString.find(*desired_msg_it) != string::npos) { found_expected = true; failure_message_strings_.insert(errorString); message_found_ = true; result = VK_TRUE; // Remove a maximum of one failure message from the set // Multiset mutation is acceptable because `break` causes flow of control to exit the for loop desired_message_strings_.erase(desired_msg_it); break; } } if (!found_expected) { printf("Unexpected: %s\n", msgString); other_messages_.push_back(errorString); } } test_platform_thread_unlock_mutex(&mutex_); return result; } vector<string> GetOtherFailureMsgs() const { return other_messages_; } VkDebugReportFlagsEXT GetMessageFlags() const { return message_flags_; } bool AnyDesiredMsgFound() const { return message_found_; } bool AllDesiredMsgsFound() const { return desired_message_strings_.empty(); } void SetError(const char *const errorString) { message_found_ = true; failure_message_strings_.insert(errorString); } void SetBailout(bool *bailout) { bailout_ = bailout; } void DumpFailureMsgs() const { vector<string> otherMsgs = GetOtherFailureMsgs(); if (otherMsgs.size()) { cout << "Other error messages logged for this test were:" << endl; for (auto iter = otherMsgs.begin(); iter != otherMsgs.end(); iter++) { cout << " " << *iter << endl; } } } // Helpers // ExpectSuccess now takes an optional argument allowing a custom combination of debug flags void ExpectSuccess(VkDebugReportFlagsEXT const message_flag_mask = VK_DEBUG_REPORT_ERROR_BIT_EXT) { // Match ANY message matching specified type SetDesiredFailureMsg(message_flag_mask, ""); message_flags_ = message_flag_mask; // override mask handling in SetDesired... } void VerifyFound() { // Not receiving expected message(s) is a failure. /Before/ throwing, dump any other messages if (!AllDesiredMsgsFound()) { DumpFailureMsgs(); for (const auto desired_msg : desired_message_strings_) { ADD_FAILURE() << "Did not receive expected error '" << desired_msg << "'"; } } Reset(); } void VerifyNotFound() { // ExpectSuccess() configured us to match anything. Any error is a failure. if (AnyDesiredMsgFound()) { DumpFailureMsgs(); for (const auto msg : failure_message_strings_) { ADD_FAILURE() << "Expected to succeed but got error: " << msg; } } Reset(); } private: // TODO: This is stopgap to block new unexpected errors from being introduced. The long-term goal is to remove the use of this // function and its definition. bool IgnoreMessage(std::string const &msg) const { if (ignore_message_strings_.empty()) { return false; } return std::find_if(ignore_message_strings_.begin(), ignore_message_strings_.end(), [&msg](std::string const &str) { return msg.find(str) != std::string::npos; }) != ignore_message_strings_.end(); } VkFlags message_flags_; std::unordered_multiset<std::string> desired_message_strings_; std::unordered_multiset<std::string> failure_message_strings_; std::vector<std::string> ignore_message_strings_; vector<string> other_messages_; test_platform_thread_mutex mutex_; bool *bailout_; bool message_found_; }; static VKAPI_ATTR VkBool32 VKAPI_CALL myDbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg, void *pUserData) { ErrorMonitor *errMonitor = (ErrorMonitor *)pUserData; if (msgFlags & errMonitor->GetMessageFlags()) { return errMonitor->CheckForDesiredMsg(pMsg); } return VK_FALSE; } class VkLayerTest : public VkRenderFramework { public: void VKTriangleTest(BsoFailSelect failCase); void GenericDrawPreparation(VkCommandBufferObj *commandBuffer, VkPipelineObj &pipelineobj, VkDescriptorSetObj &descriptorSet, BsoFailSelect failCase); void Init(VkPhysicalDeviceFeatures *features = nullptr, VkPhysicalDeviceFeatures2 *features2 = nullptr, const VkCommandPoolCreateFlags flags = 0) { InitFramework(myDbgFunc, m_errorMonitor); InitState(features, features2, flags); } protected: ErrorMonitor *m_errorMonitor; uint32_t m_instance_api_version = 0; uint32_t m_target_api_version = 0; public: ErrorMonitor *Monitor() { return m_errorMonitor; } VkCommandBufferObj *CommandBuffer() { return m_commandBuffer; } protected: bool m_enableWSI; virtual void SetUp() { m_instance_layer_names.clear(); m_instance_extension_names.clear(); m_device_extension_names.clear(); // Add default instance extensions to the list m_instance_extension_names.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); // Use Threading layer first to protect others from // ThreadCommandBufferCollision test m_instance_layer_names.push_back("VK_LAYER_GOOGLE_threading"); m_instance_layer_names.push_back("VK_LAYER_LUNARG_parameter_validation"); m_instance_layer_names.push_back("VK_LAYER_LUNARG_object_tracker"); m_instance_layer_names.push_back("VK_LAYER_LUNARG_core_validation"); m_instance_layer_names.push_back("VK_LAYER_GOOGLE_unique_objects"); if (VkTestFramework::m_devsim_layer) { if (InstanceLayerSupported("VK_LAYER_LUNARG_device_simulation")) { m_instance_layer_names.push_back("VK_LAYER_LUNARG_device_simulation"); } else { VkTestFramework::m_devsim_layer = false; printf(" Did not find VK_LAYER_LUNARG_device_simulation layer so it will not be enabled.\n"); } } if (m_enableWSI) { m_instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); #ifdef NEED_TO_TEST_THIS_ON_PLATFORM #if defined(VK_USE_PLATFORM_ANDROID_KHR) m_instance_extension_names.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_ANDROID_KHR #if defined(VK_USE_PLATFORM_WAYLAND_KHR) m_instance_extension_names.push_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_WAYLAND_KHR #if defined(VK_USE_PLATFORM_WIN32_KHR) m_instance_extension_names.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_WIN32_KHR #endif // NEED_TO_TEST_THIS_ON_PLATFORM #if defined(VK_USE_PLATFORM_XCB_KHR) m_instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); #elif defined(VK_USE_PLATFORM_XLIB_KHR) m_instance_extension_names.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_XLIB_KHR } this->app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; this->app_info.pNext = NULL; this->app_info.pApplicationName = "layer_tests"; this->app_info.applicationVersion = 1; this->app_info.pEngineName = "unittest"; this->app_info.engineVersion = 1; this->app_info.apiVersion = VK_API_VERSION_1_0; m_errorMonitor = new ErrorMonitor; // Find out what version the instance supports and record the default target instance auto enumerateInstanceVersion = (PFN_vkEnumerateInstanceVersion)vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"); if (enumerateInstanceVersion) { enumerateInstanceVersion(&m_instance_api_version); } else { m_instance_api_version = VK_API_VERSION_1_0; } m_target_api_version = app_info.apiVersion; } uint32_t SetTargetApiVersion(uint32_t target_api_version) { if (target_api_version == 0) target_api_version = VK_API_VERSION_1_0; if (target_api_version <= m_instance_api_version) { m_target_api_version = target_api_version; app_info.apiVersion = m_target_api_version; } return m_target_api_version; } uint32_t DeviceValidationVersion() { // The validation layers, assume the version we are validating to is the apiVersion unless the device apiVersion is lower VkPhysicalDeviceProperties props; GetPhysicalDeviceProperties(&props); return std::min(m_target_api_version, props.apiVersion); } bool LoadDeviceProfileLayer( PFN_vkSetPhysicalDeviceFormatPropertiesEXT &fpvkSetPhysicalDeviceFormatPropertiesEXT, PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT &fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT) { // Load required functions fpvkSetPhysicalDeviceFormatPropertiesEXT = (PFN_vkSetPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceFormatPropertiesEXT"); fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = (PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr( instance(), "vkGetOriginalPhysicalDeviceFormatPropertiesEXT"); if (!(fpvkSetPhysicalDeviceFormatPropertiesEXT) || !(fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix); return 0; } return 1; } virtual void TearDown() { // Clean up resources before we reset ShutdownFramework(); delete m_errorMonitor; } VkLayerTest() { m_enableWSI = false; } }; void VkLayerTest::VKTriangleTest(BsoFailSelect failCase) { ASSERT_TRUE(m_device && m_device->initialized()); // VKTriangleTest assumes Init() has finished ASSERT_NO_FATAL_FAILURE(InitViewport()); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&ps); bool failcase_needs_depth = false; // to mark cases that need depth attachment VkBufferObj index_buffer; switch (failCase) { case BsoFailLineWidth: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_LINE_WIDTH); VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_LINE_LIST; pipelineobj.SetInputAssembly(&ia_state); break; } case BsoFailDepthBias: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_DEPTH_BIAS); VkPipelineRasterizationStateCreateInfo rs_state = {}; rs_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state.depthBiasEnable = VK_TRUE; rs_state.lineWidth = 1.0f; pipelineobj.SetRasterization(&rs_state); break; } case BsoFailViewport: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_VIEWPORT); break; } case BsoFailScissor: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_SCISSOR); break; } case BsoFailBlend: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_BLEND_CONSTANTS); VkPipelineColorBlendAttachmentState att_state = {}; att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state.blendEnable = VK_TRUE; pipelineobj.AddColorAttachment(0, att_state); break; } case BsoFailDepthBounds: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_DEPTH_BOUNDS); break; } case BsoFailStencilReadMask: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK); break; } case BsoFailStencilWriteMask: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK); break; } case BsoFailStencilReference: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_REFERENCE); break; } case BsoFailIndexBuffer: break; case BsoFailIndexBufferBadSize: case BsoFailIndexBufferBadOffset: case BsoFailIndexBufferBadMapSize: case BsoFailIndexBufferBadMapOffset: { // Create an index buffer for these tests. // There is no need to populate it because we should bail before trying to draw. uint32_t const indices[] = {0}; VkBufferCreateInfo buffer_info = {}; buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_info.size = 1024; buffer_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; buffer_info.queueFamilyIndexCount = 1; buffer_info.pQueueFamilyIndices = indices; index_buffer.init(*m_device, buffer_info, (VkMemoryPropertyFlags)VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); } break; case BsoFailCmdClearAttachments: break; case BsoFailNone: break; default: break; } VkDescriptorSetObj descriptorSet(m_device); VkImageView *depth_attachment = nullptr; if (failcase_needs_depth) { m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu()); ASSERT_TRUE(m_depth_stencil_fmt != VK_FORMAT_UNDEFINED); m_depthStencil->Init(m_device, static_cast<uint32_t>(m_width), static_cast<uint32_t>(m_height), m_depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT); depth_attachment = m_depthStencil->BindInfo(); } ASSERT_NO_FATAL_FAILURE(InitRenderTarget(1, depth_attachment)); m_commandBuffer->begin(); GenericDrawPreparation(m_commandBuffer, pipelineobj, descriptorSet, failCase); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // render triangle if (failCase == BsoFailIndexBuffer) { // Use DrawIndexed w/o an index buffer bound m_commandBuffer->DrawIndexed(3, 1, 0, 0, 0); } else if (failCase == BsoFailIndexBufferBadSize) { // Bind the index buffer and draw one too many indices m_commandBuffer->BindIndexBuffer(&index_buffer, 0, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(513, 1, 0, 0, 0); } else if (failCase == BsoFailIndexBufferBadOffset) { // Bind the index buffer and draw one past the end of the buffer using the offset m_commandBuffer->BindIndexBuffer(&index_buffer, 0, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(512, 1, 1, 0, 0); } else if (failCase == BsoFailIndexBufferBadMapSize) { // Bind the index buffer at the middle point and draw one too many indices m_commandBuffer->BindIndexBuffer(&index_buffer, 512, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(257, 1, 0, 0, 0); } else if (failCase == BsoFailIndexBufferBadMapOffset) { // Bind the index buffer at the middle point and draw one past the end of the buffer m_commandBuffer->BindIndexBuffer(&index_buffer, 512, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(256, 1, 1, 0, 0); } else { m_commandBuffer->Draw(3, 1, 0, 0); } if (failCase == BsoFailCmdClearAttachments) { VkClearAttachment color_attachment = {}; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.colorAttachment = 1; // Someone who knew what they were doing would use 0 for the index; VkClearRect clear_rect = {{{0, 0}, {static_cast<uint32_t>(m_width), static_cast<uint32_t>(m_height)}}, 0, 0}; vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); } // finalize recording of the command buffer m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(true); DestroyRenderTarget(); } void VkLayerTest::GenericDrawPreparation(VkCommandBufferObj *commandBuffer, VkPipelineObj &pipelineobj, VkDescriptorSetObj &descriptorSet, BsoFailSelect failCase) { commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); // Make sure depthWriteEnable is set so that Depth fail test will work // correctly // Make sure stencilTestEnable is set so that Stencil fail test will work // correctly VkStencilOpState stencil = {}; stencil.failOp = VK_STENCIL_OP_KEEP; stencil.passOp = VK_STENCIL_OP_KEEP; stencil.depthFailOp = VK_STENCIL_OP_KEEP; stencil.compareOp = VK_COMPARE_OP_NEVER; VkPipelineDepthStencilStateCreateInfo ds_ci = {}; ds_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; ds_ci.pNext = NULL; ds_ci.depthTestEnable = VK_FALSE; ds_ci.depthWriteEnable = VK_TRUE; ds_ci.depthCompareOp = VK_COMPARE_OP_NEVER; ds_ci.depthBoundsTestEnable = VK_FALSE; if (failCase == BsoFailDepthBounds) { ds_ci.depthBoundsTestEnable = VK_TRUE; ds_ci.maxDepthBounds = 0.0f; ds_ci.minDepthBounds = 0.0f; } ds_ci.stencilTestEnable = VK_TRUE; ds_ci.front = stencil; ds_ci.back = stencil; pipelineobj.SetDepthStencil(&ds_ci); pipelineobj.SetViewport(m_viewports); pipelineobj.SetScissor(m_scissors); descriptorSet.CreateVKDescriptorSet(commandBuffer); VkResult err = pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); vkCmdBindPipeline(commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineobj.handle()); commandBuffer->BindDescriptorSet(descriptorSet); } class VkPositiveLayerTest : public VkLayerTest { public: protected: }; class VkWsiEnabledLayerTest : public VkLayerTest { public: protected: VkWsiEnabledLayerTest() { m_enableWSI = true; } }; class VkBufferTest { public: enum eTestEnFlags { eDoubleDelete, eInvalidDeviceOffset, eInvalidMemoryOffset, eBindNullBuffer, eBindFakeBuffer, eFreeInvalidHandle, eNone, }; enum eTestConditions { eOffsetAlignment = 1 }; static bool GetTestConditionValid(VkDeviceObj *aVulkanDevice, eTestEnFlags aTestFlag, VkBufferUsageFlags aBufferUsage = 0) { if (eInvalidDeviceOffset != aTestFlag && eInvalidMemoryOffset != aTestFlag) { return true; } VkDeviceSize offset_limit = 0; if (eInvalidMemoryOffset == aTestFlag) { VkBuffer vulkanBuffer; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 32; buffer_create_info.usage = aBufferUsage; vkCreateBuffer(aVulkanDevice->device(), &buffer_create_info, nullptr, &vulkanBuffer); VkMemoryRequirements memory_reqs = {}; vkGetBufferMemoryRequirements(aVulkanDevice->device(), vulkanBuffer, &memory_reqs); vkDestroyBuffer(aVulkanDevice->device(), vulkanBuffer, nullptr); offset_limit = memory_reqs.alignment; } else if ((VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) & aBufferUsage) { offset_limit = aVulkanDevice->props.limits.minTexelBufferOffsetAlignment; } else if (VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT & aBufferUsage) { offset_limit = aVulkanDevice->props.limits.minUniformBufferOffsetAlignment; } else if (VK_BUFFER_USAGE_STORAGE_BUFFER_BIT & aBufferUsage) { offset_limit = aVulkanDevice->props.limits.minStorageBufferOffsetAlignment; } return eOffsetAlignment < offset_limit; } // A constructor which performs validation tests within construction. VkBufferTest(VkDeviceObj *aVulkanDevice, VkBufferUsageFlags aBufferUsage, eTestEnFlags aTestFlag = eNone) : AllocateCurrent(true), BoundCurrent(false), CreateCurrent(false), InvalidDeleteEn(false), VulkanDevice(aVulkanDevice->device()) { if (eBindNullBuffer == aTestFlag || eBindFakeBuffer == aTestFlag) { VkMemoryAllocateInfo memory_allocate_info = {}; memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_allocate_info.allocationSize = 1; // fake size -- shouldn't matter for the test memory_allocate_info.memoryTypeIndex = 0; // fake type -- shouldn't matter for the test vkAllocateMemory(VulkanDevice, &memory_allocate_info, nullptr, &VulkanMemory); VulkanBuffer = (aTestFlag == eBindNullBuffer) ? VK_NULL_HANDLE : (VkBuffer)0xCDCDCDCDCDCDCDCD; vkBindBufferMemory(VulkanDevice, VulkanBuffer, VulkanMemory, 0); } else { VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 32; buffer_create_info.usage = aBufferUsage; vkCreateBuffer(VulkanDevice, &buffer_create_info, nullptr, &VulkanBuffer); CreateCurrent = true; VkMemoryRequirements memory_requirements; vkGetBufferMemoryRequirements(VulkanDevice, VulkanBuffer, &memory_requirements); VkMemoryAllocateInfo memory_allocate_info = {}; memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_allocate_info.allocationSize = memory_requirements.size + eOffsetAlignment; bool pass = aVulkanDevice->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { CreateCurrent = false; vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr); return; } vkAllocateMemory(VulkanDevice, &memory_allocate_info, NULL, &VulkanMemory); // NB: 1 is intentionally an invalid offset value const bool offset_en = eInvalidDeviceOffset == aTestFlag || eInvalidMemoryOffset == aTestFlag; vkBindBufferMemory(VulkanDevice, VulkanBuffer, VulkanMemory, offset_en ? eOffsetAlignment : 0); BoundCurrent = true; InvalidDeleteEn = (eFreeInvalidHandle == aTestFlag); } } ~VkBufferTest() { if (CreateCurrent) { vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr); } if (AllocateCurrent) { if (InvalidDeleteEn) { union { VkDeviceMemory device_memory; unsigned long long index_access; } bad_index; bad_index.device_memory = VulkanMemory; bad_index.index_access++; vkFreeMemory(VulkanDevice, bad_index.device_memory, nullptr); } vkFreeMemory(VulkanDevice, VulkanMemory, nullptr); } } bool GetBufferCurrent() { return AllocateCurrent && BoundCurrent && CreateCurrent; } const VkBuffer &GetBuffer() { return VulkanBuffer; } void TestDoubleDestroy() { // Destroy the buffer but leave the flag set, which will cause // the buffer to be destroyed again in the destructor. vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr); } protected: bool AllocateCurrent; bool BoundCurrent; bool CreateCurrent; bool InvalidDeleteEn; VkBuffer VulkanBuffer; VkDevice VulkanDevice; VkDeviceMemory VulkanMemory; }; class VkVerticesObj { public: VkVerticesObj(VkDeviceObj *aVulkanDevice, unsigned aAttributeCount, unsigned aBindingCount, unsigned aByteStride, VkDeviceSize aVertexCount, const float *aVerticies) : BoundCurrent(false), AttributeCount(aAttributeCount), BindingCount(aBindingCount), BindId(BindIdGenerator), PipelineVertexInputStateCreateInfo(), VulkanMemoryBuffer(aVulkanDevice, static_cast<int>(aByteStride * aVertexCount), reinterpret_cast<const void *>(aVerticies), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) { BindIdGenerator++; // NB: This can wrap w/misuse VertexInputAttributeDescription = new VkVertexInputAttributeDescription[AttributeCount]; VertexInputBindingDescription = new VkVertexInputBindingDescription[BindingCount]; PipelineVertexInputStateCreateInfo.pVertexAttributeDescriptions = VertexInputAttributeDescription; PipelineVertexInputStateCreateInfo.vertexAttributeDescriptionCount = AttributeCount; PipelineVertexInputStateCreateInfo.pVertexBindingDescriptions = VertexInputBindingDescription; PipelineVertexInputStateCreateInfo.vertexBindingDescriptionCount = BindingCount; PipelineVertexInputStateCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; unsigned i = 0; do { VertexInputAttributeDescription[i].binding = BindId; VertexInputAttributeDescription[i].location = i; VertexInputAttributeDescription[i].format = VK_FORMAT_R32G32B32_SFLOAT; VertexInputAttributeDescription[i].offset = sizeof(float) * aByteStride; i++; } while (AttributeCount < i); i = 0; do { VertexInputBindingDescription[i].binding = BindId; VertexInputBindingDescription[i].stride = aByteStride; VertexInputBindingDescription[i].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; i++; } while (BindingCount < i); } ~VkVerticesObj() { if (VertexInputAttributeDescription) { delete[] VertexInputAttributeDescription; } if (VertexInputBindingDescription) { delete[] VertexInputBindingDescription; } } bool AddVertexInputToPipe(VkPipelineObj &aPipelineObj) { aPipelineObj.AddVertexInputAttribs(VertexInputAttributeDescription, AttributeCount); aPipelineObj.AddVertexInputBindings(VertexInputBindingDescription, BindingCount); return true; } void BindVertexBuffers(VkCommandBuffer aCommandBuffer, unsigned aOffsetCount = 0, VkDeviceSize *aOffsetList = nullptr) { VkDeviceSize *offsetList; unsigned offsetCount; if (aOffsetCount) { offsetList = aOffsetList; offsetCount = aOffsetCount; } else { offsetList = new VkDeviceSize[1](); offsetCount = 1; } vkCmdBindVertexBuffers(aCommandBuffer, BindId, offsetCount, &VulkanMemoryBuffer.handle(), offsetList); BoundCurrent = true; if (!aOffsetCount) { delete[] offsetList; } } protected: static uint32_t BindIdGenerator; bool BoundCurrent; unsigned AttributeCount; unsigned BindingCount; uint32_t BindId; VkPipelineVertexInputStateCreateInfo PipelineVertexInputStateCreateInfo; VkVertexInputAttributeDescription *VertexInputAttributeDescription; VkVertexInputBindingDescription *VertexInputBindingDescription; VkConstantBufferObj VulkanMemoryBuffer; }; uint32_t VkVerticesObj::BindIdGenerator; struct OneOffDescriptorSet { VkDeviceObj *device_; VkDescriptorPool pool_; VkDescriptorSetLayoutObj layout_; VkDescriptorSet set_; typedef std::vector<VkDescriptorSetLayoutBinding> Bindings; OneOffDescriptorSet(VkDeviceObj *device, const Bindings &bindings) : device_{device}, pool_{}, layout_(device, bindings), set_{} { VkResult err; std::vector<VkDescriptorPoolSize> sizes; for (const auto &b : bindings) sizes.push_back({b.descriptorType, std::max(1u, b.descriptorCount)}); VkDescriptorPoolCreateInfo dspci = { VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, nullptr, 0, 1, uint32_t(sizes.size()), sizes.data()}; err = vkCreateDescriptorPool(device_->handle(), &dspci, nullptr, &pool_); if (err != VK_SUCCESS) return; VkDescriptorSetAllocateInfo alloc_info = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, nullptr, pool_, 1, &layout_.handle()}; err = vkAllocateDescriptorSets(device_->handle(), &alloc_info, &set_); } ~OneOffDescriptorSet() { // No need to destroy set-- it's going away with the pool. vkDestroyDescriptorPool(device_->handle(), pool_, nullptr); } bool Initialized() { return pool_ != VK_NULL_HANDLE && layout_.initialized() && set_ != VK_NULL_HANDLE; } }; template <typename T> bool IsValidVkStruct(const T &s) { return LvlTypeMap<T>::kSType == s.sType; } // Helper class for tersely creating create pipeline tests // // Designed with minimal error checking to ensure easy error state creation // See OneshotTest for typical usage struct CreatePipelineHelper { public: std::vector<VkDescriptorSetLayoutBinding> dsl_bindings_; std::unique_ptr<OneOffDescriptorSet> descriptor_set_; std::vector<VkPipelineShaderStageCreateInfo> shader_stages_; VkPipelineVertexInputStateCreateInfo vi_ci_ = {}; VkPipelineInputAssemblyStateCreateInfo ia_ci_ = {}; VkPipelineTessellationStateCreateInfo tess_ci_ = {}; VkViewport viewport_ = {}; VkRect2D scissor_ = {}; VkPipelineViewportStateCreateInfo vp_state_ci_ = {}; VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci_ = {}; VkPipelineLayoutCreateInfo pipeline_layout_ci_ = {}; VkPipelineLayoutObj pipeline_layout_; VkPipelineDynamicStateCreateInfo dyn_state_ci_ = {}; VkPipelineRasterizationStateCreateInfo rs_state_ci_ = {}; VkPipelineColorBlendAttachmentState cb_attachments_ = {}; VkPipelineColorBlendStateCreateInfo cb_ci_ = {}; VkGraphicsPipelineCreateInfo gp_ci_ = {}; VkPipelineCacheCreateInfo pc_ci_ = {}; VkPipeline pipeline_ = VK_NULL_HANDLE; VkPipelineCache pipeline_cache_ = VK_NULL_HANDLE; std::unique_ptr<VkShaderObj> vs_; std::unique_ptr<VkShaderObj> fs_; VkLayerTest &layer_test_; CreatePipelineHelper(VkLayerTest &test) : layer_test_(test) {} ~CreatePipelineHelper() { VkDevice device = layer_test_.device(); vkDestroyPipelineCache(device, pipeline_cache_, nullptr); vkDestroyPipeline(device, pipeline_, nullptr); } void InitDescriptorSetInfo() { dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; } void InitInputAndVertexInfo() { vi_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; ia_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; } void InitMultisampleInfo() { pipe_ms_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci_.pNext = nullptr; pipe_ms_state_ci_.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci_.sampleShadingEnable = VK_FALSE; pipe_ms_state_ci_.minSampleShading = 1.0; pipe_ms_state_ci_.pSampleMask = NULL; } void InitPipelineLayoutInfo() { pipeline_layout_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci_.setLayoutCount = 1; // Not really changeable because InitState() sets exactly one pSetLayout pipeline_layout_ci_.pSetLayouts = nullptr; // must bound after it is created } void InitViewportInfo() { viewport_ = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; scissor_ = {{0, 0}, {64, 64}}; vp_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp_state_ci_.pNext = nullptr; vp_state_ci_.viewportCount = 1; vp_state_ci_.pViewports = &viewport_; // ignored if dynamic vp_state_ci_.scissorCount = 1; vp_state_ci_.pScissors = &scissor_; // ignored if dynamic } void InitDynamicStateInfo() { // Use a "validity" check on the {} initialized structure to detect initialization // during late bind } void InitShaderInfo() { vs_.reset(new VkShaderObj(layer_test_.DeviceObj(), bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, &layer_test_)); fs_.reset(new VkShaderObj(layer_test_.DeviceObj(), bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, &layer_test_)); // We shouldn't need a fragment shader but add it to be able to run on more devices shader_stages_ = {vs_->GetStageCreateInfo(), fs_->GetStageCreateInfo()}; } void InitRasterizationInfo() { rs_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state_ci_.pNext = nullptr; rs_state_ci_.flags = 0; rs_state_ci_.depthClampEnable = VK_FALSE; rs_state_ci_.rasterizerDiscardEnable = VK_FALSE; rs_state_ci_.polygonMode = VK_POLYGON_MODE_FILL; rs_state_ci_.cullMode = VK_CULL_MODE_BACK_BIT; rs_state_ci_.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rs_state_ci_.depthBiasEnable = VK_FALSE; rs_state_ci_.lineWidth = 1.0F; } void InitBlendStateInfo() { cb_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; cb_ci_.logicOpEnable = VK_FALSE; cb_ci_.logicOp = VK_LOGIC_OP_COPY; // ignored if enable is VK_FALSE above cb_ci_.attachmentCount = layer_test_.RenderPassInfo().subpassCount; ASSERT_TRUE(IsValidVkStruct(layer_test_.RenderPassInfo())); cb_ci_.pAttachments = &cb_attachments_; for (int i = 0; i < 4; i++) { cb_ci_.blendConstants[0] = 1.0F; } } void InitGraphicsPipelineInfo() { // Color-only rendering in a subpass with no depth/stencil attachment // Active Pipeline Shader Stages // Vertex Shader // Fragment Shader // Required: Fixed-Function Pipeline Stages // VkPipelineVertexInputStateCreateInfo // VkPipelineInputAssemblyStateCreateInfo // VkPipelineViewportStateCreateInfo // VkPipelineRasterizationStateCreateInfo // VkPipelineMultisampleStateCreateInfo // VkPipelineColorBlendStateCreateInfo gp_ci_.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci_.pNext = nullptr; gp_ci_.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; gp_ci_.pVertexInputState = &vi_ci_; gp_ci_.pInputAssemblyState = &ia_ci_; gp_ci_.pTessellationState = nullptr; gp_ci_.pViewportState = &vp_state_ci_; gp_ci_.pRasterizationState = &rs_state_ci_; gp_ci_.pMultisampleState = &pipe_ms_state_ci_; gp_ci_.pDepthStencilState = nullptr; gp_ci_.pColorBlendState = &cb_ci_; gp_ci_.pDynamicState = nullptr; gp_ci_.renderPass = layer_test_.renderPass(); } void InitPipelineCacheInfo() { pc_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pc_ci_.pNext = nullptr; pc_ci_.flags = 0; pc_ci_.initialDataSize = 0; pc_ci_.pInitialData = nullptr; } // Not called by default during init_info void InitTesselationState() { // TBD -- add shaders and create_info } // TDB -- add control for optional and/or additional initialization void InitInfo() { InitDescriptorSetInfo(); InitInputAndVertexInfo(); InitMultisampleInfo(); InitPipelineLayoutInfo(); InitViewportInfo(); InitDynamicStateInfo(); InitShaderInfo(); InitRasterizationInfo(); InitBlendStateInfo(); InitGraphicsPipelineInfo(); InitPipelineCacheInfo(); } void InitState() { VkResult err; descriptor_set_.reset(new OneOffDescriptorSet(layer_test_.DeviceObj(), dsl_bindings_)); ASSERT_TRUE(descriptor_set_->Initialized()); const std::vector<VkPushConstantRange> push_ranges( pipeline_layout_ci_.pPushConstantRanges, pipeline_layout_ci_.pPushConstantRanges + pipeline_layout_ci_.pushConstantRangeCount); pipeline_layout_ = VkPipelineLayoutObj(layer_test_.DeviceObj(), {&descriptor_set_->layout_}, push_ranges); err = vkCreatePipelineCache(layer_test_.device(), &pc_ci_, NULL, &pipeline_cache_); ASSERT_VK_SUCCESS(err); } void LateBindPipelineInfo() { // By value or dynamically located items must be late bound gp_ci_.layout = pipeline_layout_.handle(); gp_ci_.stageCount = shader_stages_.size(); gp_ci_.pStages = shader_stages_.data(); if ((gp_ci_.pTessellationState == nullptr) && IsValidVkStruct(tess_ci_)) { gp_ci_.pTessellationState = &tess_ci_; } if ((gp_ci_.pDynamicState == nullptr) && IsValidVkStruct(dyn_state_ci_)) { gp_ci_.pDynamicState = &dyn_state_ci_; } } VkResult CreateGraphicsPipeline(bool implicit_destroy = true, bool do_late_bind = true) { VkResult err; if (do_late_bind) { LateBindPipelineInfo(); } if (implicit_destroy && (pipeline_ != VK_NULL_HANDLE)) { vkDestroyPipeline(layer_test_.device(), pipeline_, nullptr); pipeline_ = VK_NULL_HANDLE; } err = vkCreateGraphicsPipelines(layer_test_.device(), pipeline_cache_, 1, &gp_ci_, NULL, &pipeline_); return err; } // Helper function to create a simple test case (positive or negative) // // info_override can be any callable that takes a CreatePipelineHeper & // flags, error can be any args accepted by "SetDesiredFailure". template <typename Test, typename OverrideFunc, typename Error> static void OneshotTest(Test &test, OverrideFunc &info_override, const VkFlags flags, const std::vector<Error> &errors, bool positive_test = false) { CreatePipelineHelper helper(test); helper.InitInfo(); info_override(helper); helper.InitState(); for (const auto &error : errors) test.Monitor()->SetDesiredFailureMsg(flags, error); helper.CreateGraphicsPipeline(); if (positive_test) { test.Monitor()->VerifyNotFound(); } else { test.Monitor()->VerifyFound(); } } template <typename Test, typename OverrideFunc, typename Error> static void OneshotTest(Test &test, OverrideFunc &info_override, const VkFlags flags, Error error, bool positive_test = false) { OneshotTest(test, info_override, flags, std::vector<Error>(1, error), positive_test); } }; namespace chain_util { template <typename T> T Init(const void *pnext_in = nullptr) { T pnext_obj = {}; pnext_obj.sType = LvlTypeMap<T>::kSType; pnext_obj.pNext = pnext_in; return pnext_obj; } class ExtensionChain { const void *head_ = nullptr; typedef std::function<bool(const char *)> AddIfFunction; AddIfFunction add_if_; typedef std::vector<const char *> List; List *list_; public: template <typename F> ExtensionChain(F &add_if, List *list) : add_if_(add_if), list_(list) {} template <typename T> void Add(const char *name, T &obj) { if (add_if_(name)) { if (list_) { list_->push_back(name); } obj.pNext = head_; head_ = &obj; } } const void *Head() const { return head_; } }; } // namespace chain_util // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** TEST_F(VkLayerTest, RequiredParameter) { TEST_DESCRIPTION("Specify VK_NULL_HANDLE, NULL, and 0 for required handle, pointer, array, and array count parameters"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pFeatures specified as NULL"); // Specify NULL for a pointer to a handle // Expected to trigger an error with // parameter_validation::validate_required_pointer vkGetPhysicalDeviceFeatures(gpu(), NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pQueueFamilyPropertyCount specified as NULL"); // Specify NULL for pointer to array count // Expected to trigger an error with parameter_validation::validate_array vkGetPhysicalDeviceQueueFamilyProperties(gpu(), NULL, NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); // Specify 0 for a required array count // Expected to trigger an error with parameter_validation::validate_array VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_commandBuffer->SetViewport(0, 0, &viewport); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCreateImage-pCreateInfo-parameter"); // Specify a null pImageCreateInfo struct pointer VkImage test_image; vkCreateImage(device(), NULL, NULL, &test_image); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter"); // Specify NULL for a required array // Expected to trigger an error with parameter_validation::validate_array m_commandBuffer->SetViewport(0, 1, NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter memory specified as VK_NULL_HANDLE"); // Specify VK_NULL_HANDLE for a required handle // Expected to trigger an error with // parameter_validation::validate_required_handle vkUnmapMemory(device(), VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pFences[0] specified as VK_NULL_HANDLE"); // Specify VK_NULL_HANDLE for a required handle array entry // Expected to trigger an error with // parameter_validation::validate_required_handle_array VkFence fence = VK_NULL_HANDLE; vkResetFences(device(), 1, &fence); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pAllocateInfo specified as NULL"); // Specify NULL for a required struct pointer // Expected to trigger an error with // parameter_validation::validate_struct_type VkDeviceMemory memory = VK_NULL_HANDLE; vkAllocateMemory(device(), NULL, NULL, &memory); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "value of faceMask must not be 0"); // Specify 0 for a required VkFlags parameter // Expected to trigger an error with parameter_validation::validate_flags m_commandBuffer->SetStencilReference(0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "value of pSubmits[0].pWaitDstStageMask[0] must not be 0"); // Specify 0 for a required VkFlags array entry // Expected to trigger an error with // parameter_validation::validate_flags_array VkSemaphore semaphore = VK_NULL_HANDLE; VkPipelineStageFlags stageFlags = 0; VkSubmitInfo submitInfo = {}; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.waitSemaphoreCount = 1; submitInfo.pWaitSemaphores = &semaphore; submitInfo.pWaitDstStageMask = &stageFlags; vkQueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubmitInfo-sType-sType"); stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; // Set a bogus sType and see what happens submitInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; submitInfo.waitSemaphoreCount = 1; submitInfo.pWaitSemaphores = &semaphore; submitInfo.pWaitDstStageMask = &stageFlags; vkQueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, PnextOnlyStructValidation) { TEST_DESCRIPTION("See if checks occur on structs ONLY used in pnext chains."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array<const char *, 2> required_device_extensions = { {VK_KHR_MAINTENANCE3_EXTENSION_NAME, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device passing in a bad PdevFeatures2 value auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); // Set one of the features values to an invalid boolean value indexing_features.descriptorBindingUniformBufferUpdateAfterBind = 800; uint32_t queue_node_count; vkGetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_node_count, NULL); VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_node_count]; vkGetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_node_count, queue_props); float priorities[] = {1.0f}; VkDeviceQueueCreateInfo queue_info{}; queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queue_info.pNext = NULL; queue_info.flags = 0; queue_info.queueFamilyIndex = 0; queue_info.queueCount = 1; queue_info.pQueuePriorities = &priorities[0]; VkDeviceCreateInfo dev_info = {}; dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; dev_info.pNext = NULL; dev_info.queueCreateInfoCount = 1; dev_info.pQueueCreateInfos = &queue_info; dev_info.enabledLayerCount = 0; dev_info.ppEnabledLayerNames = NULL; dev_info.enabledExtensionCount = m_device_extension_names.size(); dev_info.ppEnabledExtensionNames = m_device_extension_names.data(); dev_info.pNext = &features2; VkDevice dev; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is neither VK_TRUE nor VK_FALSE"); m_errorMonitor->SetUnexpectedError("Failed to create"); vkCreateDevice(gpu(), &dev_info, NULL, &dev); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ReservedParameter) { TEST_DESCRIPTION("Specify a non-zero value for a reserved parameter"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " must be 0"); // Specify 0 for a reserved VkFlags parameter // Expected to trigger an error with // parameter_validation::validate_reserved_flags VkEvent event_handle = VK_NULL_HANDLE; VkEventCreateInfo event_info = {}; event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; event_info.flags = 1; vkCreateEvent(device(), &event_info, NULL, &event_handle); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DebugMarkerNameTest) { ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), "VK_LAYER_LUNARG_core_validation", VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_DEBUG_MARKER_EXTENSION_NAME); } else { printf("%s Debug Marker Extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkDebugMarkerSetObjectNameEXT fpvkDebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT)vkGetInstanceProcAddr(instance(), "vkDebugMarkerSetObjectNameEXT"); if (!(fpvkDebugMarkerSetObjectNameEXT)) { printf("%s Can't find fpvkDebugMarkerSetObjectNameEXT; skipped.\n", kSkipPrefix); return; } VkEvent event_handle = VK_NULL_HANDLE; VkEventCreateInfo event_info = {}; event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(device(), &event_info, NULL, &event_handle); VkDebugMarkerObjectNameInfoEXT name_info = {}; name_info.sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT; name_info.pNext = nullptr; name_info.object = (uint64_t)event_handle; name_info.objectType = VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT; name_info.pObjectName = "UnimaginablyImprobableString"; fpvkDebugMarkerSetObjectNameEXT(device(), &name_info); m_commandBuffer->begin(); vkCmdSetEvent(m_commandBuffer->handle(), event_handle, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UnimaginablyImprobableString"); vkDestroyEvent(m_device->device(), event_handle, NULL); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); } TEST_F(VkLayerTest, InvalidStructSType) { TEST_DESCRIPTION("Specify an invalid VkStructureType for a Vulkan structure's sType field"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "parameter pAllocateInfo->sType must be"); // Zero struct memory, effectively setting sType to // VK_STRUCTURE_TYPE_APPLICATION_INFO // Expected to trigger an error with // parameter_validation::validate_struct_type VkMemoryAllocateInfo alloc_info = {}; VkDeviceMemory memory = VK_NULL_HANDLE; vkAllocateMemory(device(), &alloc_info, NULL, &memory); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "parameter pSubmits[0].sType must be"); // Zero struct memory, effectively setting sType to // VK_STRUCTURE_TYPE_APPLICATION_INFO // Expected to trigger an error with // parameter_validation::validate_struct_type_array VkSubmitInfo submit_info = {}; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidStructPNext) { TEST_DESCRIPTION("Specify an invalid value for a Vulkan structure's pNext field"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "value of pCreateInfo->pNext must be NULL"); // Set VkMemoryAllocateInfo::pNext to a non-NULL value, when pNext must be NULL. // Need to pick a function that has no allowed pNext structure types. // Expected to trigger an error with parameter_validation::validate_struct_pnext VkEvent event = VK_NULL_HANDLE; VkEventCreateInfo event_alloc_info = {}; // Zero-initialization will provide the correct sType VkApplicationInfo app_info = {}; event_alloc_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; event_alloc_info.pNext = &app_info; vkCreateEvent(device(), &event_alloc_info, NULL, &event); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, " chain includes a structure with unexpected VkStructureType "); // Set VkMemoryAllocateInfo::pNext to a non-NULL value, but use // a function that has allowed pNext structure types and specify // a structure type that is not allowed. // Expected to trigger an error with parameter_validation::validate_struct_pnext VkDeviceMemory memory = VK_NULL_HANDLE; VkMemoryAllocateInfo memory_alloc_info = {}; memory_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_alloc_info.pNext = &app_info; vkAllocateMemory(device(), &memory_alloc_info, NULL, &memory); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueOutOfRange) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "does not fall within the begin..end range of the core VkFormat enumeration tokens"); // Specify an invalid VkFormat value // Expected to trigger an error with // parameter_validation::validate_ranged_enum VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), static_cast<VkFormat>(8000), &format_properties); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueBadMask) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains flag bits that are not recognized members of"); // Specify an invalid VkFlags bitmask value // Expected to trigger an error with parameter_validation::validate_flags VkImageFormatProperties image_format_properties; vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, static_cast<VkImageUsageFlags>(1 << 25), 0, &image_format_properties); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueBadFlag) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains flag bits that are not recognized members of"); // Specify an invalid VkFlags array entry // Expected to trigger an error with parameter_validation::validate_flags_array VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); // `stage_flags` is set to a value which, currently, is not a defined stage flag // `VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM` works well for this VkPipelineStageFlags stage_flags = VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM; // `waitSemaphoreCount` *must* be greater than 0 to perform this check VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = &stage_flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkDestroySemaphore(m_device->device(), semaphore, nullptr); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueBadBool) { // Make sure using VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE doesn't trigger a false positive. ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME); } else { printf("%s VK_KHR_sampler_mirror_clamp_to_edge extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is neither VK_TRUE nor VK_FALSE"); // Specify an invalid VkBool32 value, expecting a warning with parameter_validation::validate_bool32 VkSampler sampler = VK_NULL_HANDLE; VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo(); sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; // Not VK_TRUE or VK_FALSE sampler_info.anisotropyEnable = 3; vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, MirrorClampToEdgeNotEnabled) { TEST_DESCRIPTION("Validation should catch using CLAMP_TO_EDGE addressing mode if the extension is not enabled."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerCreateInfo-addressModeU-01079"); VkSampler sampler = VK_NULL_HANDLE; VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo(); // Set the modes to cause the error sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, AnisotropyFeatureDisabled) { TEST_DESCRIPTION("Validation should check anisotropy parameters are correct with samplerAnisotropy disabled."); // Determine if required device features are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); device_features.samplerAnisotropy = VK_FALSE; // force anisotropy off ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerCreateInfo-anisotropyEnable-01070"); VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo(); // With the samplerAnisotropy disable, the sampler must not enable it. sampler_info.anisotropyEnable = VK_TRUE; VkSampler sampler = VK_NULL_HANDLE; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == err) { vkDestroySampler(m_device->device(), sampler, NULL); } sampler = VK_NULL_HANDLE; } TEST_F(VkLayerTest, AnisotropyFeatureEnabled) { TEST_DESCRIPTION("Validation must check several conditions that apply only when Anisotropy is enabled."); // Determine if required device features are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); // These tests require that the device support anisotropic filtering if (VK_TRUE != device_features.samplerAnisotropy) { printf("%s Test requires unsupported samplerAnisotropy feature. Skipped.\n", kSkipPrefix); return; } bool cubic_support = false; if (DeviceExtensionSupported(gpu(), nullptr, "VK_IMG_filter_cubic")) { m_device_extension_names.push_back("VK_IMG_filter_cubic"); cubic_support = true; } VkSamplerCreateInfo sampler_info_ref = SafeSaneSamplerCreateInfo(); sampler_info_ref.anisotropyEnable = VK_TRUE; VkSamplerCreateInfo sampler_info = sampler_info_ref; ASSERT_NO_FATAL_FAILURE(InitState()); auto do_test = [this](std::string code, const VkSamplerCreateInfo *pCreateInfo) -> void { VkResult err; VkSampler sampler = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, code); err = vkCreateSampler(m_device->device(), pCreateInfo, NULL, &sampler); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == err) { vkDestroySampler(m_device->device(), sampler, NULL); } }; // maxAnisotropy out-of-bounds low. sampler_info.maxAnisotropy = NearestSmaller(1.0F); do_test("VUID-VkSamplerCreateInfo-anisotropyEnable-01071", &sampler_info); sampler_info.maxAnisotropy = sampler_info_ref.maxAnisotropy; // maxAnisotropy out-of-bounds high. sampler_info.maxAnisotropy = NearestGreater(m_device->phy().properties().limits.maxSamplerAnisotropy); do_test("VUID-VkSamplerCreateInfo-anisotropyEnable-01071", &sampler_info); sampler_info.maxAnisotropy = sampler_info_ref.maxAnisotropy; // Both anisotropy and unnormalized coords enabled sampler_info.unnormalizedCoordinates = VK_TRUE; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01076", &sampler_info); sampler_info.unnormalizedCoordinates = sampler_info_ref.unnormalizedCoordinates; // Both anisotropy and cubic filtering enabled if (cubic_support) { sampler_info.minFilter = VK_FILTER_CUBIC_IMG; do_test("VUID-VkSamplerCreateInfo-magFilter-01081", &sampler_info); sampler_info.minFilter = sampler_info_ref.minFilter; sampler_info.magFilter = VK_FILTER_CUBIC_IMG; do_test("VUID-VkSamplerCreateInfo-magFilter-01081", &sampler_info); sampler_info.magFilter = sampler_info_ref.magFilter; } else { printf("%s Test requires unsupported extension \"VK_IMG_filter_cubic\". Skipped.\n", kSkipPrefix); } } TEST_F(VkLayerTest, UnnormalizedCoordinatesEnabled) { TEST_DESCRIPTION("Validate restrictions on sampler parameters when unnormalizedCoordinates is true."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); VkSamplerCreateInfo sampler_info_ref = SafeSaneSamplerCreateInfo(); sampler_info_ref.unnormalizedCoordinates = VK_TRUE; sampler_info_ref.minLod = 0.0f; sampler_info_ref.maxLod = 0.0f; VkSamplerCreateInfo sampler_info = sampler_info_ref; ASSERT_NO_FATAL_FAILURE(InitState()); auto do_test = [this](std::string code, const VkSamplerCreateInfo *pCreateInfo) -> void { VkResult err; VkSampler sampler = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, code); err = vkCreateSampler(m_device->device(), pCreateInfo, NULL, &sampler); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == err) { vkDestroySampler(m_device->device(), sampler, NULL); } }; // min and mag filters must be the same sampler_info.minFilter = VK_FILTER_NEAREST; sampler_info.magFilter = VK_FILTER_LINEAR; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01072", &sampler_info); std::swap(sampler_info.minFilter, sampler_info.magFilter); do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01072", &sampler_info); sampler_info = sampler_info_ref; // mipmapMode must be NEAREST sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01073", &sampler_info); sampler_info = sampler_info_ref; // minlod and maxlod must be zero sampler_info.maxLod = 3.14159f; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01074", &sampler_info); sampler_info.minLod = 2.71828f; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01074", &sampler_info); sampler_info = sampler_info_ref; // addressModeU and addressModeV must both be CLAMP_TO_EDGE or CLAMP_TO_BORDER // checks all 12 invalid combinations out of 16 total combinations const std::array<VkSamplerAddressMode, 4> kAddressModes = {{ VK_SAMPLER_ADDRESS_MODE_REPEAT, VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, }}; for (const auto umode : kAddressModes) { for (const auto vmode : kAddressModes) { if ((umode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE && umode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) || (vmode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE && vmode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER)) { sampler_info.addressModeU = umode; sampler_info.addressModeV = vmode; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01075", &sampler_info); } } } sampler_info = sampler_info_ref; // VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01076 is tested in AnisotropyFeatureEnabled above // Since it requires checking/enabling the anisotropic filtering feature, it's easier to do it // with the other anisotropic tests. // compareEnable must be VK_FALSE sampler_info.compareEnable = VK_TRUE; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01077", &sampler_info); sampler_info = sampler_info_ref; } TEST_F(VkLayerTest, UnrecognizedValueMaxEnum) { ASSERT_NO_FATAL_FAILURE(Init()); // Specify MAX_ENUM VkFormatProperties format_properties; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "does not fall within the begin..end range"); vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_MAX_ENUM, &format_properties); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UpdateBufferAlignment) { TEST_DESCRIPTION("Check alignment parameters for vkCmdUpdateBuffer"); uint32_t updateData[] = {1, 2, 3, 4, 5, 6, 7, 8}; ASSERT_NO_FATAL_FAILURE(Init()); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj buffer; buffer.init_as_dst(*m_device, (VkDeviceSize)20, reqs); m_commandBuffer->begin(); // Introduce failure by using dstOffset that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->UpdateBuffer(buffer.handle(), 1, 4, updateData); m_errorMonitor->VerifyFound(); // Introduce failure by using dataSize that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->UpdateBuffer(buffer.handle(), 0, 6, updateData); m_errorMonitor->VerifyFound(); // Introduce failure by using dataSize that is < 0 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "must be greater than zero and less than or equal to 65536"); m_commandBuffer->UpdateBuffer(buffer.handle(), 0, (VkDeviceSize)-44, updateData); m_errorMonitor->VerifyFound(); // Introduce failure by using dataSize that is > 65536 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "must be greater than zero and less than or equal to 65536"); m_commandBuffer->UpdateBuffer(buffer.handle(), 0, (VkDeviceSize)80000, updateData); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, FillBufferAlignment) { TEST_DESCRIPTION("Check alignment parameters for vkCmdFillBuffer"); ASSERT_NO_FATAL_FAILURE(Init()); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj buffer; buffer.init_as_dst(*m_device, (VkDeviceSize)20, reqs); m_commandBuffer->begin(); // Introduce failure by using dstOffset that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->FillBuffer(buffer.handle(), 1, 4, 0x11111111); m_errorMonitor->VerifyFound(); // Introduce failure by using size that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->FillBuffer(buffer.handle(), 0, 6, 0x11111111); m_errorMonitor->VerifyFound(); // Introduce failure by using size that is zero m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "must be greater than zero"); m_commandBuffer->FillBuffer(buffer.handle(), 0, 0, 0x11111111); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, PSOPolygonModeInvalid) { TEST_DESCRIPTION("Attempt to use a non-solid polygon fill mode in a pipeline when this feature is not enabled."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Artificially disable support for non-solid fill modes features.fillModeNonSolid = VK_FALSE; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); VkRenderpassObj render_pass(&test_device); const VkPipelineLayoutObj pipeline_layout(&test_device); VkPipelineRasterizationStateCreateInfo rs_ci = {}; rs_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_ci.pNext = nullptr; rs_ci.lineWidth = 1.0f; rs_ci.rasterizerDiscardEnable = VK_TRUE; VkShaderObj vs(&test_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(&test_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set polygonMode to unsupported value POINT, should fail m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "polygonMode cannot be VK_POLYGON_MODE_POINT or VK_POLYGON_MODE_LINE"); { VkPipelineObj pipe(&test_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); // Introduce failure by setting unsupported polygon mode rs_ci.polygonMode = VK_POLYGON_MODE_POINT; pipe.SetRasterization(&rs_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); } m_errorMonitor->VerifyFound(); // Try again with polygonMode=LINE, should fail m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "polygonMode cannot be VK_POLYGON_MODE_POINT or VK_POLYGON_MODE_LINE"); { VkPipelineObj pipe(&test_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); // Introduce failure by setting unsupported polygon mode rs_ci.polygonMode = VK_POLYGON_MODE_LINE; pipe.SetRasterization(&rs_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); } m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SparseBindingImageBufferCreate) { TEST_DESCRIPTION("Create buffer/image with sparse attributes but without the sparse_binding bit set"); ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; buf_info.size = 2048; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; if (m_device->phy().features().sparseResidencyBuffer) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-flags-00918"); buf_info.flags = VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT; vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyBuffer feature. Skipped.\n", kSkipPrefix); return; } if (m_device->phy().features().sparseResidencyAliased) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-flags-00918"); buf_info.flags = VK_BUFFER_CREATE_SPARSE_ALIASED_BIT; vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyAliased feature. Skipped.\n", kSkipPrefix); return; } VkImage image; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 512; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; if (m_device->phy().features().sparseResidencyImage2D) { image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00987"); vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyImage2D feature. Skipped.\n", kSkipPrefix); return; } if (m_device->phy().features().sparseResidencyAliased) { image_create_info.flags = VK_IMAGE_CREATE_SPARSE_ALIASED_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00987"); vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyAliased feature. Skipped.\n", kSkipPrefix); return; } } TEST_F(VkLayerTest, SparseResidencyImageCreateUnsupportedTypes) { TEST_DESCRIPTION("Create images with sparse residency with unsupported types"); // Determine which device feature are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); // Mask out device features we don't want and initialize device state device_features.sparseResidencyImage2D = VK_FALSE; device_features.sparseResidencyImage3D = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); if (!m_device->phy().features().sparseBinding) { printf("%s Test requires unsupported sparseBinding feature. Skipped.\n", kSkipPrefix); return; } VkImage image = VK_NULL_HANDLE; VkResult result = VK_RESULT_MAX_ENUM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_1D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 512; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT; // 1D image w/ sparse residency is an error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00970"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // 2D image w/ sparse residency when feature isn't available image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.extent.height = 64; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00971"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // 3D image w/ sparse residency when feature isn't available image_create_info.imageType = VK_IMAGE_TYPE_3D; image_create_info.extent.depth = 8; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00972"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } } TEST_F(VkLayerTest, SparseResidencyImageCreateUnsupportedSamples) { TEST_DESCRIPTION("Create images with sparse residency with unsupported tiling or sample counts"); // Determine which device feature are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); // These tests require that the device support sparse residency for 2D images if (VK_TRUE != device_features.sparseResidencyImage2D) { printf("%s Test requires unsupported SparseResidencyImage2D feature. Skipped.\n", kSkipPrefix); return; } // Mask out device features we don't want and initialize device state device_features.sparseResidency2Samples = VK_FALSE; device_features.sparseResidency4Samples = VK_FALSE; device_features.sparseResidency8Samples = VK_FALSE; device_features.sparseResidency16Samples = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); VkImage image = VK_NULL_HANDLE; VkResult result = VK_RESULT_MAX_ENUM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT; // 2D image w/ sparse residency and linear tiling is an error m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT then image tiling of VK_IMAGE_TILING_LINEAR is not supported"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Multi-sample image w/ sparse residency when feature isn't available (4 flavors) image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00973"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00974"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.samples = VK_SAMPLE_COUNT_8_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00975"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.samples = VK_SAMPLE_COUNT_16_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00976"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } } TEST_F(VkLayerTest, InvalidMemoryAliasing) { TEST_DESCRIPTION( "Create a buffer and image, allocate memory, and bind the buffer and image to memory such that they will alias."); VkResult err; bool pass; ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer, buffer2; VkImage image; VkImage image2; VkDeviceMemory mem; // buffer will be bound first VkDeviceMemory mem_img; // image bound first VkMemoryRequirements buff_mem_reqs, img_mem_reqs; VkMemoryRequirements buff_mem_reqs2, img_mem_reqs2; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 256; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &buff_mem_reqs); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; // Image tiling must be optimal to trigger error when aliasing linear buffer image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image2); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &img_mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; // Ensure memory is big enough for both bindings alloc_info.allocationSize = buff_mem_reqs.size + img_mem_reqs.size; pass = m_device->phy().set_memory_type(buff_mem_reqs.memoryTypeBits & img_mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); vkDestroyImage(m_device->device(), image, NULL); vkDestroyImage(m_device->device(), image2, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image2, &img_mem_reqs2); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, " is aliased with linear buffer 0x"); // VALIDATION FAILURE due to image mapping overlapping buffer mapping err = vkBindImageMemory(m_device->device(), image, mem, 0); m_errorMonitor->VerifyFound(); // Now correctly bind image2 to second mem allocation before incorrectly // aliasing buffer2 err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer2); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem_img); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image2, mem_img, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is aliased with non-linear image 0x"); vkGetBufferMemoryRequirements(m_device->device(), buffer2, &buff_mem_reqs2); err = vkBindBufferMemory(m_device->device(), buffer2, mem_img, 0); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); vkDestroyBuffer(m_device->device(), buffer2, NULL); vkDestroyImage(m_device->device(), image, NULL); vkDestroyImage(m_device->device(), image2, NULL); vkFreeMemory(m_device->device(), mem, NULL); vkFreeMemory(m_device->device(), mem_img, NULL); } TEST_F(VkLayerTest, InvalidMemoryMapping) { TEST_DESCRIPTION("Attempt to map memory in a number of incorrect ways"); VkResult err; bool pass; ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; const VkDeviceSize atom_size = m_device->props.limits.nonCoherentAtomSize; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 256; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; // Ensure memory is big enough for both bindings static const VkDeviceSize allocation_size = 0x10000; alloc_info.allocationSize = allocation_size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); uint8_t *pData; // Attempt to map memory size 0 is invalid m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VkMapMemory: Attempting to map memory range of size zero"); err = vkMapMemory(m_device->device(), mem, 0, 0, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // Map memory twice err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VkMapMemory: Attempting to map memory on an already-mapped object "); err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // Unmap the memory to avoid re-map error vkUnmapMemory(m_device->device(), mem); // overstep allocation with VK_WHOLE_SIZE m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " with size of VK_WHOLE_SIZE oversteps total array size 0x"); err = vkMapMemory(m_device->device(), mem, allocation_size + 1, VK_WHOLE_SIZE, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // overstep allocation w/o VK_WHOLE_SIZE m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " oversteps total array size 0x"); err = vkMapMemory(m_device->device(), mem, 1, allocation_size, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // Now error due to unmapping memory that's not mapped m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Unmapping Memory without memory being mapped: "); vkUnmapMemory(m_device->device(), mem); m_errorMonitor->VerifyFound(); // Now map memory and cause errors due to flushing invalid ranges err = vkMapMemory(m_device->device(), mem, 4 * atom_size, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); VkMappedMemoryRange mmr = {}; mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = atom_size; // Error b/c offset less than offset of mapped mem m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00685"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Now flush range that oversteps mapped range vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = atom_size; mmr.size = 4 * atom_size; // Flushing bounds exceed mapped bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00685"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Now flush range with VK_WHOLE_SIZE that oversteps offset vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 2 * atom_size, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = atom_size; mmr.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00686"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Some platforms have an atomsize of 1 which makes the test meaningless if (atom_size > 3) { // Now with an offset NOT a multiple of the device limit vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = 3; // Not a multiple of atom_size mmr.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-offset-00687"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Now with a size NOT a multiple of the device limit vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = atom_size; mmr.size = 2 * atom_size + 1; // Not a multiple of atom_size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-01390"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); } pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } // TODO : If we can get HOST_VISIBLE w/o HOST_COHERENT we can test cases of // kVUID_Core_MemTrack_InvalidMap in validateAndCopyNoncoherentMemoryToDriver() vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, MapMemWithoutHostVisibleBit) { TEST_DESCRIPTION("Allocate memory that is not mappable and then attempt to map it."); VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkMapMemory-memory-00682"); ASSERT_NO_FATAL_FAILURE(Init()); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 1024; pass = m_device->phy().set_memory_type(0xFFFFFFFF, &mem_alloc, 0, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { // If we can't find any unmappable memory this test doesn't // make sense printf("%s No unmappable memory types found, skipping test\n", kSkipPrefix); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); void *mappedAddress = NULL; err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, &mappedAddress); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, RebindMemory) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "which has already been bound to mem object"); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image, allocate memory, free it, and then try to bind it VkImage image; VkDeviceMemory mem1; VkDeviceMemory mem2; VkMemoryRequirements mem_reqs; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; // Introduce failure, do NOT set memProps to // VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT mem_alloc.memoryTypeIndex = 1; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); // allocate 2 memory objects err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem1); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem2); ASSERT_VK_SUCCESS(err); // Bind first memory object to Image object err = vkBindImageMemory(m_device->device(), image, mem1, 0); ASSERT_VK_SUCCESS(err); // Introduce validation failure, try to bind a different memory object to // the same image object err = vkBindImageMemory(m_device->device(), image, mem2, 0); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), mem1, NULL); vkFreeMemory(m_device->device(), mem2, NULL); } TEST_F(VkLayerTest, SubmitSignaledFence) { vk_testing::Fence testFence; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "submitted in SIGNALED state. Fences must be reset before being submitted"); VkFenceCreateInfo fenceInfo = {}; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, nullptr, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->end(); testFence.init(*m_device, fenceInfo); VkSubmitInfo submit_info; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.pNext = NULL; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; vkQueueSubmit(m_device->m_queue, 1, &submit_info, testFence.handle()); vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidUsageBits) { TEST_DESCRIPTION( "Specify wrong usage for image then create conflicting view of image Initialize buffer with wrong usage then perform copy " "expecting errors from both the image and the buffer (2 calls)"); ASSERT_NO_FATAL_FAILURE(Init()); auto format = FindSupportedDepthStencilFormat(gpu()); if (!format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageObj image(m_device); // Initialize image with transfer source usage image.Init(128, 128, 1, format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView dsv; VkImageViewCreateInfo dsvci = {}; dsvci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; dsvci.image = image.handle(); dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D; dsvci.format = format; dsvci.subresourceRange.layerCount = 1; dsvci.subresourceRange.baseMipLevel = 0; dsvci.subresourceRange.levelCount = 1; dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; // Create a view with depth / stencil aspect for image with different usage m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid usage flag for Image "); vkCreateImageView(m_device->device(), &dsvci, NULL, &dsv); m_errorMonitor->VerifyFound(); // Initialize buffer with TRANSFER_DST usage VkBufferObj buffer; VkMemoryPropertyFlags reqs = 0; buffer.init_as_dst(*m_device, 128 * 128, reqs); VkBufferImageCopy region = {}; region.bufferRowLength = 128; region.bufferImageHeight = 128; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; region.imageSubresource.layerCount = 1; region.imageExtent.height = 16; region.imageExtent.width = 16; region.imageExtent.depth = 1; // Buffer usage not set to TRANSFER_SRC and image usage not set to TRANSFER_DST m_commandBuffer->begin(); // two separate errors from this call: m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-dstImage-00177"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-srcBuffer-00174"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, LeakAnObject) { VkResult err; TEST_DESCRIPTION("Create a fence and destroy its device without first destroying the fence."); // Note that we have to create a new device since destroying the // framework's device causes Teardown() to fail and just calling Teardown // will destroy the errorMonitor. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has not been destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props); // The sacrificial device object VkDevice testDevice; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = queue_info.size(); device_create_info.pQueueCreateInfos = queue_info.data(); device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.pEnabledFeatures = &features; err = vkCreateDevice(gpu(), &device_create_info, NULL, &testDevice); ASSERT_VK_SUCCESS(err); VkFence fence; VkFenceCreateInfo fence_create_info = {}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fence_create_info.pNext = NULL; fence_create_info.flags = 0; err = vkCreateFence(testDevice, &fence_create_info, NULL, &fence); ASSERT_VK_SUCCESS(err); // Induce failure by not calling vkDestroyFence vkDestroyDevice(testDevice, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCommandPoolConsistency) { TEST_DESCRIPTION("Allocate command buffers from one command pool and attempt to delete them from another."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "FreeCommandBuffers is attempting to free Command Buffer"); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandPool command_pool_one; VkCommandPool command_pool_two; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool_one); vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool_two); VkCommandBuffer cb; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool_one; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &cb); vkFreeCommandBuffers(m_device->device(), command_pool_two, 1, &cb); m_errorMonitor->VerifyFound(); vkDestroyCommandPool(m_device->device(), command_pool_one, NULL); vkDestroyCommandPool(m_device->device(), command_pool_two, NULL); } TEST_F(VkLayerTest, InvalidDescriptorPoolConsistency) { VkResult err; TEST_DESCRIPTION("Allocate descriptor sets from one DS pool and attempt to delete them from another."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "FreeDescriptorSets is attempting to free descriptorSet"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = 0; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool bad_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &bad_pool); ASSERT_VK_SUCCESS(err); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); err = vkFreeDescriptorSets(m_device->device(), bad_pool, 1, &ds.set_); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->device(), bad_pool, NULL); } TEST_F(VkLayerTest, CreateUnknownObject) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageMemoryRequirements-image-parameter"); TEST_DESCRIPTION("Pass an invalid image object handle into a Vulkan API call."); ASSERT_NO_FATAL_FAILURE(Init()); // Pass bogus handle into GetImageMemoryRequirements VkMemoryRequirements mem_reqs; uint64_t fakeImageHandle = 0xCADECADE; VkImage fauxImage = reinterpret_cast<VkImage &>(fakeImageHandle); vkGetImageMemoryRequirements(m_device->device(), fauxImage, &mem_reqs); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UseObjectWithWrongDevice) { TEST_DESCRIPTION( "Try to destroy a render pass object using a device other than the one it was created on. This should generate a distinct " "error from the invalid handle error."); // Create first device and renderpass ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create second device float priorities[] = {1.0f}; VkDeviceQueueCreateInfo queue_info{}; queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queue_info.pNext = NULL; queue_info.flags = 0; queue_info.queueFamilyIndex = 0; queue_info.queueCount = 1; queue_info.pQueuePriorities = &priorities[0]; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = 1; device_create_info.pQueueCreateInfos = &queue_info; device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.pEnabledFeatures = &features; VkDevice second_device; ASSERT_VK_SUCCESS(vkCreateDevice(gpu(), &device_create_info, NULL, &second_device)); // Try to destroy the renderpass from the first device using the second device m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyRenderPass-renderPass-parent"); vkDestroyRenderPass(second_device, m_renderPass, NULL); m_errorMonitor->VerifyFound(); vkDestroyDevice(second_device, NULL); } TEST_F(VkLayerTest, PipelineNotBound) { TEST_DESCRIPTION("Pass in an invalid pipeline object handle into a Vulkan API call."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindPipeline-pipeline-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipeline badPipeline = (VkPipeline)((size_t)0xbaadb1be); m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, badPipeline); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, BindImageInvalidMemoryType) { VkResult err; TEST_DESCRIPTION("Test validation check for an invalid memory type index during bind[Buffer|Image]Memory time"); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image, allocate memory, set a bad typeIndex and then try to // bind it VkImage image; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; // Introduce Failure, select invalid TypeIndex VkPhysicalDeviceMemoryProperties memory_info; vkGetPhysicalDeviceMemoryProperties(gpu(), &memory_info); unsigned int i; for (i = 0; i < memory_info.memoryTypeCount; i++) { if ((mem_reqs.memoryTypeBits & (1 << i)) == 0) { mem_alloc.memoryTypeIndex = i; break; } } if (i >= memory_info.memoryTypeCount) { printf("%s No invalid memory type index could be found; skipped.\n", kSkipPrefix); vkDestroyImage(m_device->device(), image, NULL); return; } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "for this object type are not compatible with the memory"); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, mem, 0); (void)err; m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, BindInvalidMemory) { VkResult err; bool pass; ASSERT_NO_FATAL_FAILURE(Init()); const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM; const int32_t tex_width = 256; const int32_t tex_height = 256; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.pNext = NULL; buffer_create_info.flags = 0; buffer_create_info.size = 4 * 1024 * 1024; buffer_create_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; // Create an image/buffer, allocate memory, free it, and then try to bind it { VkImage image = VK_NULL_HANDLE; VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_mem_alloc = {}, buffer_mem_alloc = {}; image_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_mem_alloc.allocationSize = image_mem_reqs.size; pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_mem_alloc, 0); ASSERT_TRUE(pass); buffer_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_mem_alloc.allocationSize = buffer_mem_reqs.size; pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_mem_alloc, 0); ASSERT_TRUE(pass); VkDeviceMemory image_mem = VK_NULL_HANDLE, buffer_mem = VK_NULL_HANDLE; err = vkAllocateMemory(device(), &image_mem_alloc, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(device(), &buffer_mem_alloc, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); vkFreeMemory(device(), image_mem, NULL); vkFreeMemory(device(), buffer_mem, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-parameter"); err = vkBindImageMemory(device(), image, image_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-parameter"); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } // Try to bind memory to an object that already has a memory binding { VkImage image = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_alloc_info.allocationSize = image_mem_reqs.size; buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_alloc_info.allocationSize = buffer_mem_reqs.size; pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_alloc_info, 0); ASSERT_TRUE(pass); pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_alloc_info, 0); ASSERT_TRUE(pass); VkDeviceMemory image_mem, buffer_mem; err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(device(), image, image_mem, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-01044"); err = vkBindImageMemory(device(), image, image_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-01029"); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkFreeMemory(device(), image_mem, NULL); vkFreeMemory(device(), buffer_mem, NULL); vkDestroyImage(device(), image, NULL); vkDestroyBuffer(device(), buffer, NULL); } // Try to bind memory to an object with an invalid memoryOffset { VkImage image = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; // Leave some extra space for alignment wiggle room image_alloc_info.allocationSize = image_mem_reqs.size + image_mem_reqs.alignment; buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_alloc_info.allocationSize = buffer_mem_reqs.size + buffer_mem_reqs.alignment; pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_alloc_info, 0); ASSERT_TRUE(pass); pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_alloc_info, 0); ASSERT_TRUE(pass); VkDeviceMemory image_mem, buffer_mem; err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); // Test unaligned memory offset { if (image_mem_reqs.alignment > 1) { VkDeviceSize image_offset = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memoryOffset-01048"); err = vkBindImageMemory(device(), image, image_mem, image_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } if (buffer_mem_reqs.alignment > 1) { VkDeviceSize buffer_offset = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memoryOffset-01036"); err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } } // Test memory offsets outside the memory allocation { VkDeviceSize image_offset = (image_alloc_info.allocationSize + image_mem_reqs.alignment) & ~(image_mem_reqs.alignment - 1); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memoryOffset-01046"); err = vkBindImageMemory(device(), image, image_mem, image_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); VkDeviceSize buffer_offset = (buffer_alloc_info.allocationSize + buffer_mem_reqs.alignment) & ~(buffer_mem_reqs.alignment - 1); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memoryOffset-01031"); err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } // Test memory offsets within the memory allocation, but which leave too little memory for // the resource. { VkDeviceSize image_offset = (image_mem_reqs.size - 1) & ~(image_mem_reqs.alignment - 1); if ((image_offset > 0) && (image_mem_reqs.size < (image_alloc_info.allocationSize - image_mem_reqs.alignment))) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-size-01049"); err = vkBindImageMemory(device(), image, image_mem, image_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } VkDeviceSize buffer_offset = (buffer_mem_reqs.size - 1) & ~(buffer_mem_reqs.alignment - 1); if (buffer_offset > 0) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-size-01037"); err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } } vkFreeMemory(device(), image_mem, NULL); vkFreeMemory(device(), buffer_mem, NULL); vkDestroyImage(device(), image, NULL); vkDestroyBuffer(device(), buffer, NULL); } // Try to bind memory to an object with an invalid memory type { VkImage image = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_alloc_info.allocationSize = image_mem_reqs.size; buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_alloc_info.allocationSize = buffer_mem_reqs.size; // Create a mask of available memory types *not* supported by these resources, // and try to use one of them. VkPhysicalDeviceMemoryProperties memory_properties = {}; vkGetPhysicalDeviceMemoryProperties(m_device->phy().handle(), &memory_properties); VkDeviceMemory image_mem, buffer_mem; uint32_t image_unsupported_mem_type_bits = ((1 << memory_properties.memoryTypeCount) - 1) & ~image_mem_reqs.memoryTypeBits; if (image_unsupported_mem_type_bits != 0) { pass = m_device->phy().set_memory_type(image_unsupported_mem_type_bits, &image_alloc_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-01047"); err = vkBindImageMemory(device(), image, image_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkFreeMemory(device(), image_mem, NULL); } uint32_t buffer_unsupported_mem_type_bits = ((1 << memory_properties.memoryTypeCount) - 1) & ~buffer_mem_reqs.memoryTypeBits; if (buffer_unsupported_mem_type_bits != 0) { pass = m_device->phy().set_memory_type(buffer_unsupported_mem_type_bits, &buffer_alloc_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-01035"); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkFreeMemory(device(), buffer_mem, NULL); } vkDestroyImage(device(), image, NULL); vkDestroyBuffer(device(), buffer, NULL); } // Try to bind memory to an image created with sparse memory flags { VkImageCreateInfo sparse_image_create_info = image_create_info; sparse_image_create_info.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT; VkImageFormatProperties image_format_properties = {}; err = vkGetPhysicalDeviceImageFormatProperties(m_device->phy().handle(), sparse_image_create_info.format, sparse_image_create_info.imageType, sparse_image_create_info.tiling, sparse_image_create_info.usage, sparse_image_create_info.flags, &image_format_properties); if (!m_device->phy().features().sparseResidencyImage2D || err == VK_ERROR_FORMAT_NOT_SUPPORTED) { // most likely means sparse formats aren't supported here; skip this test. } else { ASSERT_VK_SUCCESS(err); if (image_format_properties.maxExtent.width == 0) { printf("%s Sparse image format not supported; skipped.\n", kSkipPrefix); return; } else { VkImage sparse_image = VK_NULL_HANDLE; err = vkCreateImage(m_device->device(), &sparse_image_create_info, NULL, &sparse_image); ASSERT_VK_SUCCESS(err); VkMemoryRequirements sparse_mem_reqs = {}; vkGetImageMemoryRequirements(m_device->device(), sparse_image, &sparse_mem_reqs); if (sparse_mem_reqs.memoryTypeBits != 0) { VkMemoryAllocateInfo sparse_mem_alloc = {}; sparse_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; sparse_mem_alloc.pNext = NULL; sparse_mem_alloc.allocationSize = sparse_mem_reqs.size; sparse_mem_alloc.memoryTypeIndex = 0; pass = m_device->phy().set_memory_type(sparse_mem_reqs.memoryTypeBits, &sparse_mem_alloc, 0); ASSERT_TRUE(pass); VkDeviceMemory sparse_mem = VK_NULL_HANDLE; err = vkAllocateMemory(m_device->device(), &sparse_mem_alloc, NULL, &sparse_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-01045"); err = vkBindImageMemory(m_device->device(), sparse_image, sparse_mem, 0); // This may very well return an error. (void)err; m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), sparse_mem, NULL); } vkDestroyImage(m_device->device(), sparse_image, NULL); } } } // Try to bind memory to a buffer created with sparse memory flags { VkBufferCreateInfo sparse_buffer_create_info = buffer_create_info; sparse_buffer_create_info.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT; if (!m_device->phy().features().sparseResidencyBuffer) { // most likely means sparse formats aren't supported here; skip this test. } else { VkBuffer sparse_buffer = VK_NULL_HANDLE; err = vkCreateBuffer(m_device->device(), &sparse_buffer_create_info, NULL, &sparse_buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements sparse_mem_reqs = {}; vkGetBufferMemoryRequirements(m_device->device(), sparse_buffer, &sparse_mem_reqs); if (sparse_mem_reqs.memoryTypeBits != 0) { VkMemoryAllocateInfo sparse_mem_alloc = {}; sparse_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; sparse_mem_alloc.pNext = NULL; sparse_mem_alloc.allocationSize = sparse_mem_reqs.size; sparse_mem_alloc.memoryTypeIndex = 0; pass = m_device->phy().set_memory_type(sparse_mem_reqs.memoryTypeBits, &sparse_mem_alloc, 0); ASSERT_TRUE(pass); VkDeviceMemory sparse_mem = VK_NULL_HANDLE; err = vkAllocateMemory(m_device->device(), &sparse_mem_alloc, NULL, &sparse_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-01030"); err = vkBindBufferMemory(m_device->device(), sparse_buffer, sparse_mem, 0); // This may very well return an error. (void)err; m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), sparse_mem, NULL); } vkDestroyBuffer(m_device->device(), sparse_buffer, NULL); } } } TEST_F(VkLayerTest, BindMemoryToDestroyedObject) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image object, allocate memory, destroy the object and then try // to bind it VkImage image; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); // Allocate memory err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); // Introduce validation failure, destroy Image object before binding vkDestroyImage(m_device->device(), image, NULL); ASSERT_VK_SUCCESS(err); // Now Try to bind memory to this destroyed object err = vkBindImageMemory(m_device->device(), image, mem, 0); // This may very well return an error. (void)err; m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, ExceedMemoryAllocationCount) { VkResult err = VK_SUCCESS; const int max_mems = 32; VkDeviceMemory mems[max_mems + 1]; if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = (PFN_vkSetPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceLimitsEXT"); PFN_vkGetOriginalPhysicalDeviceLimitsEXT fpvkGetOriginalPhysicalDeviceLimitsEXT = (PFN_vkGetOriginalPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkGetOriginalPhysicalDeviceLimitsEXT"); if (!(fpvkSetPhysicalDeviceLimitsEXT) || !(fpvkGetOriginalPhysicalDeviceLimitsEXT)) { printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix); return; } VkPhysicalDeviceProperties props; fpvkGetOriginalPhysicalDeviceLimitsEXT(gpu(), &props.limits); if (props.limits.maxMemoryAllocationCount > max_mems) { props.limits.maxMemoryAllocationCount = max_mems; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &props.limits); } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Number of currently valid memory objects is not less than the maximum allowed"); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.memoryTypeIndex = 0; mem_alloc.allocationSize = 4; int i; for (i = 0; i <= max_mems; i++) { err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mems[i]); if (err != VK_SUCCESS) { break; } } m_errorMonitor->VerifyFound(); for (int j = 0; j < i; j++) { vkFreeMemory(m_device->device(), mems[j], NULL); } } TEST_F(VkLayerTest, CreatePipelineBadVertexAttributeFormat) { TEST_DESCRIPTION("Test that pipeline validation catches invalid vertex attribute formats"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs; memset(&input_attribs, 0, sizeof(input_attribs)); // Pick a really bad format for this purpose and make sure it should fail input_attribs.format = VK_FORMAT_BC2_UNORM_BLOCK; VkFormatProperties format_props = m_device->format_properties(input_attribs.format); if ((format_props.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != 0) { printf("%s Format unsuitable for test; skipped.\n", kSkipPrefix); return; } input_attribs.location = 0; char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-format-00623"); VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attribs, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageSampleCounts) { TEST_DESCRIPTION("Use bad sample counts in image transfer calls to trigger validation errors."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); VkMemoryPropertyFlags reqs = 0; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 256; image_create_info.extent.height = 256; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.flags = 0; VkImageBlit blit_region = {}; blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.srcSubresource.baseArrayLayer = 0; blit_region.srcSubresource.layerCount = 1; blit_region.srcSubresource.mipLevel = 0; blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.dstSubresource.baseArrayLayer = 0; blit_region.dstSubresource.layerCount = 1; blit_region.dstSubresource.mipLevel = 0; blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {256, 256, 1}; blit_region.dstOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[1] = {128, 128, 1}; // Create two images, the source with sampleCount = 4, and attempt to blit // between them { image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; VkImageObj src_image(m_device); src_image.init(&image_create_info); src_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&image_create_info); dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->begin(); // TODO: These 2 VUs are redundant - expect one of them to go away m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00233"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228"); vkCmdBlitImage(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } // Create two images, the dest with sampleCount = 4, and attempt to blit // between them { image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; VkImageObj src_image(m_device); src_image.init(&image_create_info); src_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&image_create_info); dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->begin(); // TODO: These 2 VUs are redundant - expect one of them to go away m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00234"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228"); vkCmdBlitImage(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } VkBufferImageCopy copy_region = {}; copy_region.bufferRowLength = 128; copy_region.bufferImageHeight = 128; copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.imageSubresource.layerCount = 1; copy_region.imageExtent.height = 64; copy_region.imageExtent.width = 64; copy_region.imageExtent.depth = 1; // Create src buffer and dst image with sampleCount = 4 and attempt to copy // buffer to image { VkBufferObj src_buffer; src_buffer.init_as_src(*m_device, 128 * 128 * 4, reqs); image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&image_create_info); dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "was created with a sample count of VK_SAMPLE_COUNT_4_BIT but must be VK_SAMPLE_COUNT_1_BIT"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), src_buffer.handle(), dst_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } // Create dst buffer and src image with sampleCount = 4 and attempt to copy // image to buffer { VkBufferObj dst_buffer; dst_buffer.init_as_dst(*m_device, 128 * 128 * 4, reqs); image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; vk_testing::Image src_image; src_image.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "was created with a sample count of VK_SAMPLE_COUNT_4_BIT but must be VK_SAMPLE_COUNT_1_BIT"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_buffer.handle(), 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } } TEST_F(VkLayerTest, BlitImageFormatTypes) { ASSERT_NO_FATAL_FAILURE(Init()); VkFormat f_unsigned = VK_FORMAT_R8G8B8A8_UINT; VkFormat f_signed = VK_FORMAT_R8G8B8A8_SINT; VkFormat f_float = VK_FORMAT_R32_SFLOAT; VkFormat f_depth = VK_FORMAT_D32_SFLOAT_S8_UINT; VkFormat f_depth2 = VK_FORMAT_D32_SFLOAT; if (!ImageFormatIsSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_depth2, VK_IMAGE_TILING_OPTIMAL)) { printf("%s Requested formats not supported - BlitImageFormatTypes skipped.\n", kSkipPrefix); return; } // Note any missing feature bits bool usrc = !ImageFormatAndFeaturesSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); bool udst = !ImageFormatAndFeaturesSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool ssrc = !ImageFormatAndFeaturesSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); bool sdst = !ImageFormatAndFeaturesSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool fsrc = !ImageFormatAndFeaturesSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); bool fdst = !ImageFormatAndFeaturesSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool d1dst = !ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool d2src = !ImageFormatAndFeaturesSupported(gpu(), f_depth2, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); VkImageObj unsigned_image(m_device); unsigned_image.Init(64, 64, 1, f_unsigned, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(unsigned_image.initialized()); unsigned_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj signed_image(m_device); signed_image.Init(64, 64, 1, f_signed, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(signed_image.initialized()); signed_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj float_image(m_device); float_image.Init(64, 64, 1, f_float, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(float_image.initialized()); float_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj depth_image(m_device); depth_image.Init(64, 64, 1, f_depth, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(depth_image.initialized()); depth_image.SetLayout(VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj depth_image2(m_device); depth_image2.Init(64, 64, 1, f_depth2, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(depth_image2.initialized()); depth_image2.SetLayout(VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {64, 64, 1}; blitRegion.dstOffsets[0] = {0, 0, 0}; blitRegion.dstOffsets[1] = {32, 32, 1}; m_commandBuffer->begin(); // Unsigned int vs not an int m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (usrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (fdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), unsigned_image.image(), unsigned_image.Layout(), float_image.image(), float_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (fsrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (udst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), float_image.image(), float_image.Layout(), unsigned_image.image(), unsigned_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Signed int vs not an int, m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); if (ssrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (fdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), signed_image.image(), signed_image.Layout(), float_image.image(), float_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); if (fsrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (sdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), float_image.image(), float_image.Layout(), signed_image.image(), signed_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Signed vs Unsigned int - generates both VUs m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (ssrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (udst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), signed_image.image(), signed_image.Layout(), unsigned_image.image(), unsigned_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (usrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (sdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), unsigned_image.image(), unsigned_image.Layout(), signed_image.image(), signed_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Depth vs any non-identical depth format m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00231"); blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; if (d2src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (d1dst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), depth_image2.image(), depth_image2.Layout(), depth_image.image(), depth_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, BlitImageFilters) { bool cubic_support = false; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, "VK_IMG_filter_cubic")) { m_device_extension_names.push_back("VK_IMG_filter_cubic"); cubic_support = true; } ASSERT_NO_FATAL_FAILURE(InitState()); VkFormat fmt = VK_FORMAT_R8_UINT; if (!ImageFormatIsSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL)) { printf("%s No R8_UINT format support - BlitImageFilters skipped.\n", kSkipPrefix); return; } // Create 2D images VkImageObj src2D(m_device); VkImageObj dst2D(m_device); src2D.Init(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); dst2D.Init(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(src2D.initialized()); ASSERT_TRUE(dst2D.initialized()); src2D.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); dst2D.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); // Create 3D image VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_3D; ci.format = fmt; ci.extent = {64, 64, 4}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj src3D(m_device); src3D.init(&ci); ASSERT_TRUE(src3D.initialized()); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {48, 48, 1}; blitRegion.dstOffsets[0] = {0, 0, 0}; blitRegion.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // UINT format should not support linear filtering, but check to be sure if (!ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-02001"); vkCmdBlitImage(m_commandBuffer->handle(), src2D.image(), src2D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion, VK_FILTER_LINEAR); m_errorMonitor->VerifyFound(); } if (cubic_support && !ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG)) { // Invalid filter CUBIC_IMG m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-02002"); vkCmdBlitImage(m_commandBuffer->handle(), src3D.image(), src3D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion, VK_FILTER_CUBIC_IMG); m_errorMonitor->VerifyFound(); // Invalid filter CUBIC_IMG + invalid 2D source image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-02002"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-00237"); vkCmdBlitImage(m_commandBuffer->handle(), src2D.image(), src2D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion, VK_FILTER_CUBIC_IMG); m_errorMonitor->VerifyFound(); } m_commandBuffer->end(); } TEST_F(VkLayerTest, BlitImageLayout) { TEST_DESCRIPTION("Incorrect vkCmdBlitImage layouts"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); VkResult err; VkFormat fmt = VK_FORMAT_R8G8B8A8_UNORM; VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Create images VkImageObj img_src_transfer(m_device); VkImageObj img_dst_transfer(m_device); VkImageObj img_general(m_device); VkImageObj img_color(m_device); img_src_transfer.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_dst_transfer.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_general.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_color.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(img_src_transfer.initialized()); ASSERT_TRUE(img_dst_transfer.initialized()); ASSERT_TRUE(img_general.initialized()); ASSERT_TRUE(img_color.initialized()); img_src_transfer.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); img_dst_transfer.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); img_general.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); img_color.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); VkImageBlit blit_region = {}; blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.srcSubresource.baseArrayLayer = 0; blit_region.srcSubresource.layerCount = 1; blit_region.srcSubresource.mipLevel = 0; blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.dstSubresource.baseArrayLayer = 0; blit_region.dstSubresource.layerCount = 1; blit_region.dstSubresource.mipLevel = 0; blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {48, 48, 1}; blit_region.dstOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // Illegal srcImageLayout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImageLayout-00222"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, img_dst_transfer.image(), img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR); m_errorMonitor->VerifyFound(); // Illegal destImageLayout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImageLayout-00227"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_dst_transfer.image(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); m_commandBuffer->reset(0); m_commandBuffer->begin(); // Source image in invalid layout at start of the CB m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_color.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); m_commandBuffer->reset(0); m_commandBuffer->begin(); // Destination image in invalid layout at start of the CB m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout"); vkCmdBlitImage(m_commandBuffer->handle(), img_color.image(), VK_IMAGE_LAYOUT_GENERAL, img_dst_transfer.image(), img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); // Source image in invalid layout in the middle of CB m_commandBuffer->reset(0); m_commandBuffer->begin(); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = nullptr; img_barrier.srcAccessMask = 0; img_barrier.dstAccessMask = 0; img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; img_barrier.image = img_general.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImageLayout-00221"); vkCmdBlitImage(m_commandBuffer->handle(), img_general.image(), VK_IMAGE_LAYOUT_GENERAL, img_dst_transfer.image(), img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); // Destination image in invalid layout in the middle of CB m_commandBuffer->reset(0); m_commandBuffer->begin(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; img_barrier.image = img_dst_transfer.handle(); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImageLayout-00226"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_dst_transfer.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); } TEST_F(VkLayerTest, BlitImageOffsets) { ASSERT_NO_FATAL_FAILURE(Init()); VkFormat fmt = VK_FORMAT_R8G8B8A8_UNORM; if (!ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT)) { printf("%s No blit feature bits - BlitImageOffsets skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_1D; ci.format = fmt; ci.extent = {64, 1, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj image_1D(m_device); image_1D.init(&ci); ASSERT_TRUE(image_1D.initialized()); ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {64, 64, 1}; VkImageObj image_2D(m_device); image_2D.init(&ci); ASSERT_TRUE(image_2D.initialized()); ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {64, 64, 64}; VkImageObj image_3D(m_device); image_3D.init(&ci); ASSERT_TRUE(image_3D.initialized()); VkImageBlit blit_region = {}; blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.srcSubresource.baseArrayLayer = 0; blit_region.srcSubresource.layerCount = 1; blit_region.srcSubresource.mipLevel = 0; blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.dstSubresource.baseArrayLayer = 0; blit_region.dstSubresource.layerCount = 1; blit_region.dstSubresource.mipLevel = 0; m_commandBuffer->begin(); // 1D, with src/dest y offsets other than (0,1) blit_region.srcOffsets[0] = {0, 1, 0}; blit_region.srcOffsets[1] = {30, 1, 1}; blit_region.dstOffsets[0] = {32, 0, 0}; blit_region.dstOffsets[1] = {64, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00245"); vkCmdBlitImage(m_commandBuffer->handle(), image_1D.image(), image_1D.Layout(), image_1D.image(), image_1D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[0] = {32, 1, 0}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstImage-00250"); vkCmdBlitImage(m_commandBuffer->handle(), image_1D.image(), image_1D.Layout(), image_1D.image(), image_1D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // 2D, with src/dest z offsets other than (0,1) blit_region.srcOffsets[0] = {0, 0, 1}; blit_region.srcOffsets[1] = {24, 31, 1}; blit_region.dstOffsets[0] = {32, 32, 0}; blit_region.dstOffsets[1] = {64, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00247"); vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[0] = {32, 32, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstImage-00252"); vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Source offsets exceeding source image dimensions blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {65, 64, 1}; // src x blit_region.dstOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[1] = {64, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00243"); // x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[1] = {64, 65, 1}; // src y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00244"); // y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[0] = {0, 0, 65}; // src z blit_region.srcOffsets[1] = {64, 64, 64}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00246"); // z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Dest offsets exceeding source image dimensions blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {64, 64, 1}; blit_region.dstOffsets[0] = {96, 64, 32}; // dst x blit_region.dstOffsets[1] = {64, 0, 33}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00248"); // x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.dstOffsets[0] = {0, 65, 32}; // dst y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00249"); // y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.dstOffsets[0] = {0, 64, 65}; // dst z blit_region.dstOffsets[1] = {64, 0, 64}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00251"); // z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, MiscBlitImageTests) { ASSERT_NO_FATAL_FAILURE(Init()); VkFormat f_color = VK_FORMAT_R32_SFLOAT; // Need features ..BLIT_SRC_BIT & ..BLIT_DST_BIT if (!ImageFormatAndFeaturesSupported(gpu(), f_color, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT)) { printf("%s Requested format features unavailable - MiscBlitImageTests skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = f_color; ci.extent = {64, 64, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // 2D color image VkImageObj color_img(m_device); color_img.init(&ci); ASSERT_TRUE(color_img.initialized()); // 2D multi-sample image ci.samples = VK_SAMPLE_COUNT_4_BIT; VkImageObj ms_img(m_device); ms_img.init(&ci); ASSERT_TRUE(ms_img.initialized()); // 3D color image ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {64, 64, 8}; VkImageObj color_3D_img(m_device); color_3D_img.init(&ci); ASSERT_TRUE(color_3D_img.initialized()); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {16, 16, 1}; blitRegion.dstOffsets[0] = {32, 32, 0}; blitRegion.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // Blit with aspectMask errors blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-aspectMask-00241"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-aspectMask-00242"); vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid src mip level blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.mipLevel = ci.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcSubresource-01705"); // invalid srcSubresource.mipLevel // Redundant unavoidable errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00243"); // out-of-bounds srcOffset.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00244"); // out-of-bounds srcOffset.y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00246"); // out-of-bounds srcOffset.z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // region not contained within src image vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid dst mip level blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.mipLevel = ci.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstSubresource-01706"); // invalid dstSubresource.mipLevel // Redundant unavoidable errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00248"); // out-of-bounds dstOffset.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00249"); // out-of-bounds dstOffset.y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00251"); // out-of-bounds dstOffset.z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // region not contained within dst image vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid src array layer blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcSubresource.baseArrayLayer = ci.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcSubresource-01707"); // invalid srcSubresource layer range vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid dst array layer blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.baseArrayLayer = ci.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstSubresource-01708"); // invalid dstSubresource layer range // Redundant unavoidable errors vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blitRegion.dstSubresource.baseArrayLayer = 0; // Blit multi-sample image // TODO: redundant VUs, one (1c8) or two (1d2 & 1d4) should be eliminated. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00233"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00234"); vkCmdBlitImage(m_commandBuffer->handle(), ms_img.image(), ms_img.Layout(), ms_img.image(), ms_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit 3D with baseArrayLayer != 0 or layerCount != 1 blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00240"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcSubresource-01707"); // base+count > total layer count vkCmdBlitImage(m_commandBuffer->handle(), color_3D_img.image(), color_3D_img.Layout(), color_3D_img.image(), color_3D_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00240"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-layerCount-01700"); // layer count == 0 (src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-layerCount-00239"); // src/dst layer count mismatch vkCmdBlitImage(m_commandBuffer->handle(), color_3D_img.image(), color_3D_img.Layout(), color_3D_img.image(), color_3D_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, BlitToDepthImageTests) { ASSERT_NO_FATAL_FAILURE(Init()); // Need feature ..BLIT_SRC_BIT but not ..BLIT_DST_BIT // TODO: provide more choices here; supporting D32_SFLOAT as BLIT_DST isn't unheard of. VkFormat f_depth = VK_FORMAT_D32_SFLOAT; if (!ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT) || ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT)) { printf("%s Requested format features unavailable - BlitToDepthImageTests skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = f_depth; ci.extent = {64, 64, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // 2D depth image VkImageObj depth_img(m_device); depth_img.init(&ci); ASSERT_TRUE(depth_img.initialized()); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {16, 16, 1}; blitRegion.dstOffsets[0] = {32, 32, 0}; blitRegion.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // Blit depth image - has SRC_BIT but not DST_BIT blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), depth_img.image(), depth_img.Layout(), depth_img.image(), depth_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, MinImageTransferGranularity) { TEST_DESCRIPTION("Tests for validation of Queue Family property minImageTransferGranularity."); ASSERT_NO_FATAL_FAILURE(Init()); auto queue_family_properties = m_device->phy().queue_properties(); auto large_granularity_family = std::find_if(queue_family_properties.begin(), queue_family_properties.end(), [](VkQueueFamilyProperties family_properties) { VkExtent3D family_granularity = family_properties.minImageTransferGranularity; // We need a queue family that supports copy operations and has a large enough minImageTransferGranularity for the tests // below to make sense. return (family_properties.queueFlags & VK_QUEUE_TRANSFER_BIT || family_properties.queueFlags & VK_QUEUE_GRAPHICS_BIT || family_properties.queueFlags & VK_QUEUE_COMPUTE_BIT) && family_granularity.depth >= 4 && family_granularity.width >= 4 && family_granularity.height >= 4; }); if (large_granularity_family == queue_family_properties.end()) { printf("%s No queue family has a large enough granularity for this test to be meaningful, skipping test\n", kSkipPrefix); return; } const size_t queue_family_index = std::distance(queue_family_properties.begin(), large_granularity_family); VkExtent3D granularity = queue_family_properties[queue_family_index].minImageTransferGranularity; VkCommandPoolObj command_pool(m_device, queue_family_index, 0); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_3D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = granularity.width * 2; image_create_info.extent.height = granularity.height * 2; image_create_info.extent.depth = granularity.depth * 2; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = 0; VkImageObj src_image_obj(m_device); src_image_obj.init(&image_create_info); ASSERT_TRUE(src_image_obj.initialized()); srcImage = src_image_obj.handle(); image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image_obj(m_device); dst_image_obj.init(&image_create_info); ASSERT_TRUE(dst_image_obj.initialized()); dstImage = dst_image_obj.handle(); VkCommandBufferObj command_buffer(m_device, &command_pool); ASSERT_TRUE(command_buffer.initialized()); command_buffer.begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = granularity.width; copyRegion.extent.height = granularity.height; copyRegion.extent.depth = granularity.depth; // Introduce failure by setting srcOffset to a bad granularity value copyRegion.srcOffset.y = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity command_buffer.CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // Introduce failure by setting extent to a granularity value that is bad // for both the source and destination image. copyRegion.srcOffset.y = 0; copyRegion.extent.width = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity command_buffer.CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // Now do some buffer/image copies VkBufferObj buffer; VkMemoryPropertyFlags reqs = 0; buffer.init_as_src_and_dst(*m_device, 8 * granularity.height * granularity.width * granularity.depth, reqs); VkBufferImageCopy region = {}; region.bufferOffset = 0; region.bufferRowLength = 0; region.bufferImageHeight = 0; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageExtent.height = granularity.height; region.imageExtent.width = granularity.width; region.imageExtent.depth = granularity.depth; region.imageOffset.x = 0; region.imageOffset.y = 0; region.imageOffset.z = 0; // Introduce failure by setting imageExtent to a bad granularity value region.imageExtent.width = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(command_buffer.handle(), srcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.imageExtent.width = granularity.width; // Introduce failure by setting imageOffset to a bad granularity value region.imageOffset.z = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(command_buffer.handle(), buffer.handle(), dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); command_buffer.end(); } TEST_F(VkLayerTest, MismatchedQueueFamiliesOnSubmit) { TEST_DESCRIPTION( "Submit command buffer created using one queue family and attempt to submit them on a queue created in a different queue " "family."); ASSERT_NO_FATAL_FAILURE(Init()); // assumes it initializes all queue families on vkCreateDevice // This test is meaningless unless we have multiple queue families auto queue_family_properties = m_device->phy().queue_properties(); std::vector<uint32_t> queue_families; for (uint32_t i = 0; i < queue_family_properties.size(); ++i) if (queue_family_properties[i].queueCount > 0) queue_families.push_back(i); if (queue_families.size() < 2) { printf("%s Device only has one queue family; skipped.\n", kSkipPrefix); return; } const uint32_t queue_family = queue_families[0]; const uint32_t other_queue_family = queue_families[1]; VkQueue other_queue; vkGetDeviceQueue(m_device->device(), other_queue_family, 0, &other_queue); VkCommandPoolObj cmd_pool(m_device, queue_family); VkCommandBufferObj cmd_buff(m_device, &cmd_pool); cmd_buff.begin(); cmd_buff.end(); // Submit on the wrong queue VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmd_buff.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkQueueSubmit-pCommandBuffers-00074"); vkQueueSubmit(other_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DrawWithPipelineIncompatibleWithSubpass) { TEST_DESCRIPTION("Use a pipeline for the wrong subpass in a render pass instance"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with two subpasses, both writing the same attachment. VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 1, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 2, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); char const *vsSource = "#version 450\n" "void main() { gl_Position = vec4(1); }\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main() { color = vec4(1); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); const VkPipelineLayoutObj pl(m_device); pipe.CreateVKPipeline(pl.handle(), rp); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; // subtest 1: bind in the wrong subpass vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "built for subpass 0 but used in subpass 1"); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); // subtest 2: bind in correct subpass, then transition to next subpass vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "built for subpass 0 but used in subpass 1"); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, ImageBarrierSubpassConflicts) { TEST_DESCRIPTION("Add a pipeline barrier within a subpass that has conflicting state"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with a single subpass that declared a self-dependency VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkRenderPass rp_noselfdep; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); rpci.dependencyCount = 0; rpci.pDependencies = nullptr; err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp_noselfdep); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp_noselfdep, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); VkMemoryBarrier mem_barrier = {}; mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; mem_barrier.pNext = NULL; mem_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; mem_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 1, &mem_barrier, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); rpbi.renderPass = rp; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; // Mis-match src stage mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Now mis-match dst stage mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_HOST_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Set srcQueueFamilyIndex to something other than IGNORED img_barrier.srcQueueFamilyIndex = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; // Mis-match mem barrier src access mask mem_barrier = {}; mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; mem_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT; mem_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); // Mis-match mem barrier dst access mask. Also set srcAccessMask to 0 which should not cause an error mem_barrier.srcAccessMask = 0; mem_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); // Mis-match image barrier src access mask img_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Mis-match image barrier dst access mask img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Mis-match dependencyFlags img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0 /* wrong */, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Send non-zero bufferMemoryBarrierCount // Construct a valid BufferMemoryBarrier to avoid any parameter errors // First we need a valid buffer to reference VkBufferObj buffer; VkMemoryPropertyFlags mem_reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; buffer.init_as_src_and_dst(*m_device, 256, mem_reqs); VkBufferMemoryBarrier bmb = {}; bmb.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; bmb.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; bmb.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; bmb.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bmb.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bmb.buffer = buffer.handle(); bmb.offset = 0; bmb.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &bmb, 0, nullptr); m_errorMonitor->VerifyFound(); // Add image barrier w/ image handle that's not in framebuffer VkImageObj lone_image(m_device); lone_image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_barrier.image = lone_image.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-01179"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Have image barrier with mis-matched layouts img_barrier.image = image.handle(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-oldLayout-01181"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-oldLayout-01180"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyRenderPass(m_device->device(), rp_noselfdep, nullptr); } TEST_F(VkLayerTest, InvalidSecondaryCommandBufferBarrier) { TEST_DESCRIPTION("Add an invalid image barrier in a secondary command buffer"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with a single subpass that declared a self-dependency VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); // Second image that img_barrier will incorrectly use VkImageObj image2(m_device); image2.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo cbii = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, rp, 0, VK_NULL_HANDLE, // Set to NULL FB handle intentionally to flesh out any errors VK_FALSE, 0, 0}; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, &cbii}; vkBeginCommandBuffer(secondary.handle(), &cbbi); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image2.handle(); // Image mis-matches with FB image img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); secondary.end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-01179"); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, ImageBarrierSubpassConflict) { TEST_DESCRIPTION("Check case where subpass index references different image from image barrier"); ASSERT_NO_FATAL_FAILURE(Init()); // Create RP/FB combo where subpass has incorrect index attachment, this is 2nd half of "VUID-vkCmdPipelineBarrier-image-01179" VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; // ref attachment points to wrong attachment index compared to img_barrier below VkAttachmentReference ref = {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkImageObj image2(m_device); image2.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView2 = image2.targetView(VK_FORMAT_R8G8B8A8_UNORM); // re-use imageView from start of test VkImageView iv_array[2] = {imageView, imageView2}; VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 2, iv_array, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); /* barrier references image from attachment index 0 */ img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-01179"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, TemporaryExternalSemaphore) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external semaphore instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external semaphore device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external semaphore import and export capability VkPhysicalDeviceExternalSemaphoreInfoKHR esi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR, nullptr, handle_type}; VkExternalSemaphorePropertiesKHR esp = {VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"); vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(gpu(), &esi, &esp); if (!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) || !(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External semaphore does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; // Create a semaphore to export payload from VkExportSemaphoreCreateInfoKHR esci = {VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR, nullptr, handle_type}; VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, &esci, 0}; VkSemaphore export_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &export_semaphore); ASSERT_VK_SUCCESS(err); // Create a semaphore to import payload into sci.pNext = nullptr; VkSemaphore import_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &import_semaphore); ASSERT_VK_SUCCESS(err); #ifdef _WIN32 // Export semaphore payload to an opaque handle HANDLE handle = nullptr; VkSemaphoreGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreWin32HandleKHR"); err = vkGetSemaphoreWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above *temporarily* VkImportSemaphoreWin32HandleInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, nullptr, import_semaphore, VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR, handle_type, handle, nullptr}; auto vkImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreWin32HandleKHR"); err = vkImportSemaphoreWin32HandleKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #else // Export semaphore payload to an opaque handle int fd = 0; VkSemaphoreGetFdInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreFdKHR"); err = vkGetSemaphoreFdKHR(m_device->device(), &ghi, &fd); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above *temporarily* VkImportSemaphoreFdInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, nullptr, import_semaphore, VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd}; auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR"); err = vkImportSemaphoreFdKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #endif // Wait on the imported semaphore twice in vkQueueSubmit, the second wait should be an error VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo si[] = { {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, }; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has no way to be signaled"); vkQueueSubmit(m_device->m_queue, 4, si, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Wait on the imported semaphore twice in vkQueueBindSparse, the second wait should be an error VkBindSparseInfo bi[] = { {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, }; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has no way to be signaled"); vkQueueBindSparse(m_device->m_queue, 4, bi, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroySemaphore(m_device->device(), export_semaphore, nullptr); vkDestroySemaphore(m_device->device(), import_semaphore, nullptr); } TEST_F(VkLayerTest, TemporaryExternalFence) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external fence instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external fence device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external fence import and export capability VkPhysicalDeviceExternalFenceInfoKHR efi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR, nullptr, handle_type}; VkExternalFencePropertiesKHR efp = {VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalFencePropertiesKHR"); vkGetPhysicalDeviceExternalFencePropertiesKHR(gpu(), &efi, &efp); if (!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR) || !(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External fence does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; // Create a fence to export payload from VkFence export_fence; { VkExportFenceCreateInfoKHR efci = {VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR, nullptr, handle_type}; VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, &efci, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &export_fence); ASSERT_VK_SUCCESS(err); } // Create a fence to import payload into VkFence import_fence; { VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &import_fence); ASSERT_VK_SUCCESS(err); } #ifdef _WIN32 // Export fence payload to an opaque handle HANDLE handle = nullptr; { VkFenceGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceWin32HandleKHR"); err = vkGetFenceWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceWin32HandleInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR, nullptr, import_fence, VK_FENCE_IMPORT_TEMPORARY_BIT_KHR, handle_type, handle, nullptr}; auto vkImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceWin32HandleKHR"); err = vkImportFenceWin32HandleKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #else // Export fence payload to an opaque handle int fd = 0; { VkFenceGetFdInfoKHR gfi = {VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceFdKHR"); err = vkGetFenceFdKHR(m_device->device(), &gfi, &fd); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceFdInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, nullptr, import_fence, VK_FENCE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd}; auto vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceFdKHR"); err = vkImportFenceFdKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #endif // Undo the temporary import vkResetFences(m_device->device(), 1, &import_fence); // Signal the previously imported fence twice, the second signal should produce a validation error vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "is already in use by another submission."); vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); m_errorMonitor->VerifyFound(); // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroyFence(m_device->device(), export_fence, nullptr); vkDestroyFence(m_device->device(), import_fence, nullptr); } TEST_F(VkPositiveLayerTest, SecondaryCommandBufferBarrier) { TEST_DESCRIPTION("Add a pipeline barrier in a secondary command buffer"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with a single subpass that declared a self-dependency VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo cbii = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, rp, 0, VK_NULL_HANDLE, // Set to NULL FB handle intentionally to flesh out any errors VK_FALSE, 0, 0}; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, &cbii}; vkBeginCommandBuffer(secondary.handle(), &cbbi); VkMemoryBarrier mem_barrier = {}; mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; mem_barrier.pNext = NULL; mem_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; mem_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0, nullptr); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); secondary.end(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } static void TestRenderPassCreate(ErrorMonitor *error_monitor, const VkDevice device, const VkRenderPassCreateInfo *create_info, PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR, const char *rp1_vuid, const char *rp2_vuid) { // "... must be less than the total number of attachments ..." VkRenderPass render_pass = VK_NULL_HANDLE; VkResult err; if (rp1_vuid) { error_monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, rp1_vuid); err = vkCreateRenderPass(device, create_info, nullptr, &render_pass); if (err == VK_SUCCESS) vkDestroyRenderPass(device, render_pass, nullptr); error_monitor->VerifyFound(); } if (vkCreateRenderPass2KHR && rp2_vuid) { safe_VkRenderPassCreateInfo2KHR create_info2; ConvertVkRenderPassCreateInfoToV2KHR(create_info, &create_info2); error_monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, rp2_vuid); err = vkCreateRenderPass2KHR(device, create_info2.ptr(), nullptr, &render_pass); if (err == VK_SUCCESS) vkDestroyRenderPass(device, render_pass, nullptr); error_monitor->VerifyFound(); } } TEST_F(VkLayerTest, RenderPassCreateAttachmentIndexOutOfRange) { // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } // There are no attachments, but refer to attachment 0. VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, subpasses, 0, nullptr}; // "... must be less than the total number of attachments ..." TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkRenderPassCreateInfo-attachment-00834", "VUID-VkRenderPassCreateInfo2KHR-attachment-03051"); } TEST_F(VkLayerTest, RenderPassCreateAttachmentReadOnlyButCleared) { // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; bool maintenance2Supported = false; // Check for VK_KHR_maintenance2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); maintenance2Supported = true; } // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (m_device->props.apiVersion < VK_API_VERSION_1_1) { maintenance2Supported = true; } if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } VkAttachmentDescription description = {0, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference depth_stencil_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &depth_stencil_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &description, 1, &subpass, 0, nullptr}; // VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL but depth cleared TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkRenderPassCreateInfo-pAttachments-00836", "VUID-VkRenderPassCreateInfo2KHR-pAttachments-02522"); if (maintenance2Supported) { // VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL but depth cleared depth_stencil_ref.layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkRenderPassCreateInfo-pAttachments-01566", nullptr); // VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL but depth cleared depth_stencil_ref.layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkRenderPassCreateInfo-pAttachments-01567", nullptr); } } TEST_F(VkLayerTest, RenderPassCreateAttachmentUsedTwiceColor) { TEST_DESCRIPTION("Attachment is used simultaneously as two color attachments. This is usually unintended."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference refs[] = { {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 2, refs, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "subpass 0 already uses attachment 0 as a color attachment", "subpass 0 already uses attachment 0 as a color attachment"); } TEST_F(VkLayerTest, RenderPassCreateAttachmentDescriptionInvalidFinalLayout) { TEST_DESCRIPTION("VkAttachmentDescription's finalLayout must not be UNDEFINED or PREINITIALIZED"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE; attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attach_desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc.finalLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkAttachmentReference attach_ref = {}; attach_ref.attachment = 0; attach_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach_ref; VkRenderPassCreateInfo rpci = {}; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rpci.attachmentCount = 1; rpci.pAttachments = &attach_desc; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkAttachmentDescription-finalLayout-00843", "VUID-VkAttachmentDescription2KHR-finalLayout-03061"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkAttachmentDescription-finalLayout-00843", "VUID-VkAttachmentDescription2KHR-finalLayout-03061"); } TEST_F(VkLayerTest, RenderPassCreateAttachmentsMisc) { TEST_DESCRIPTION( "Ensure that CreateRenderPass produces the expected validation errors when a subpass's attachments violate the valid usage " "conditions."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } std::vector<VkAttachmentDescription> attachments = { // input attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, // color attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, // depth attachment {0, VK_FORMAT_D24_UNORM_S8_UINT, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}, // resolve attachment {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, // preserve attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; std::vector<VkAttachmentReference> input = { {0, VK_IMAGE_LAYOUT_GENERAL}, }; std::vector<VkAttachmentReference> color = { {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {2, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference depth = {3, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; std::vector<VkAttachmentReference> resolve = { {4, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; std::vector<uint32_t> preserve = {5}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, (uint32_t)input.size(), input.data(), (uint32_t)color.size(), color.data(), resolve.data(), &depth, (uint32_t)preserve.size(), preserve.data()}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, (uint32_t)attachments.size(), attachments.data(), 1, &subpass, 0, nullptr}; // Test too many color attachments { std::vector<VkAttachmentReference> too_many_colors(m_device->props.limits.maxColorAttachments + 1, color[0]); subpass.colorAttachmentCount = (uint32_t)too_many_colors.size(); subpass.pColorAttachments = too_many_colors.data(); subpass.pResolveAttachments = NULL; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-colorAttachmentCount-00845", "VUID-VkSubpassDescription2KHR-colorAttachmentCount-03063"); subpass.colorAttachmentCount = (uint32_t)color.size(); subpass.pColorAttachments = color.data(); subpass.pResolveAttachments = resolve.data(); } // Test sample count mismatch between color buffers attachments[subpass.pColorAttachments[1].attachment].samples = VK_SAMPLE_COUNT_8_BIT; depth.attachment = VK_ATTACHMENT_UNUSED; // Avoids triggering 01418 TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-pColorAttachments-01417", "VUID-VkSubpassDescription2KHR-pColorAttachments-03069"); depth.attachment = 3; attachments[subpass.pColorAttachments[1].attachment].samples = attachments[subpass.pColorAttachments[0].attachment].samples; // Test sample count mismatch between color buffers and depth buffer attachments[subpass.pDepthStencilAttachment->attachment].samples = VK_SAMPLE_COUNT_8_BIT; subpass.colorAttachmentCount = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-pDepthStencilAttachment-01418", "VUID-VkSubpassDescription2KHR-pDepthStencilAttachment-03071"); attachments[subpass.pDepthStencilAttachment->attachment].samples = attachments[subpass.pColorAttachments[0].attachment].samples; subpass.colorAttachmentCount = (uint32_t)color.size(); // Test resolve attachment with UNUSED color attachment color[0].attachment = VK_ATTACHMENT_UNUSED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-pResolveAttachments-00847", "VUID-VkSubpassDescription2KHR-pResolveAttachments-03065"); color[0].attachment = 1; // Test resolve from a single-sampled color attachment attachments[subpass.pColorAttachments[0].attachment].samples = VK_SAMPLE_COUNT_1_BIT; subpass.colorAttachmentCount = 1; // avoid mismatch (00337), and avoid double report subpass.pDepthStencilAttachment = nullptr; // avoid mismatch (01418) TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-pResolveAttachments-00848", "VUID-VkSubpassDescription2KHR-pResolveAttachments-03066"); attachments[subpass.pColorAttachments[0].attachment].samples = VK_SAMPLE_COUNT_4_BIT; subpass.colorAttachmentCount = (uint32_t)color.size(); subpass.pDepthStencilAttachment = &depth; // Test resolve to a multi-sampled resolve attachment attachments[subpass.pResolveAttachments[0].attachment].samples = VK_SAMPLE_COUNT_4_BIT; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-pResolveAttachments-00849", "VUID-VkSubpassDescription2KHR-pResolveAttachments-03067"); attachments[subpass.pResolveAttachments[0].attachment].samples = VK_SAMPLE_COUNT_1_BIT; // Test with color/resolve format mismatch attachments[subpass.pColorAttachments[0].attachment].format = VK_FORMAT_R8G8B8A8_SRGB; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-pResolveAttachments-00850", "VUID-VkSubpassDescription2KHR-pResolveAttachments-03068"); attachments[subpass.pColorAttachments[0].attachment].format = attachments[subpass.pResolveAttachments[0].attachment].format; // Test for UNUSED preserve attachments preserve[0] = VK_ATTACHMENT_UNUSED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-attachment-00853", "VUID-VkSubpassDescription2KHR-attachment-03073"); preserve[0] = 5; // Test for preserve attachments used elsewhere in the subpass color[0].attachment = preserve[0]; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-pPreserveAttachments-00854", "VUID-VkSubpassDescription2KHR-pPreserveAttachments-03074"); color[0].attachment = 1; input[0].attachment = 0; input[0].layout = VK_IMAGE_LAYOUT_GENERAL; // Test for attachment used first as input with loadOp=CLEAR { std::vector<VkSubpassDescription> subpasses = {subpass, subpass, subpass}; subpasses[0].inputAttachmentCount = 0; subpasses[1].inputAttachmentCount = 0; attachments[input[0].attachment].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; VkRenderPassCreateInfo rpci_multipass = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, (uint32_t)attachments.size(), attachments.data(), (uint32_t)subpasses.size(), subpasses.data(), 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci_multipass, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-loadOp-00846", "VUID-VkSubpassDescription2KHR-loadOp-03064"); attachments[input[0].attachment].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; } } TEST_F(VkLayerTest, RenderPassCreateAttachmentReferenceInvalidLayout) { TEST_DESCRIPTION("Attachment reference uses PREINITIALIZED or UNDEFINED layouts"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference refs[] = { {0, VK_IMAGE_LAYOUT_UNDEFINED}, }; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, refs, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; // Use UNDEFINED layout TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkAttachmentReference-layout-00857", "VUID-VkAttachmentReference2KHR-layout-03077"); // Use PREINITIALIZED layout refs[0].layout = VK_IMAGE_LAYOUT_PREINITIALIZED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkAttachmentReference-layout-00857", "VUID-VkAttachmentReference2KHR-layout-03077"); } TEST_F(VkLayerTest, RenderPassCreateOverlappingCorrelationMasks) { TEST_DESCRIPTION("Create a subpass with overlapping correlation masks"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MULTIVIEW_EXTENSION_NAME); return; } // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}; uint32_t viewMasks[] = {0x3u}; uint32_t correlationMasks[] = {0x1u, 0x3u}; VkRenderPassMultiviewCreateInfo rpmvci = { VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, nullptr, 1, viewMasks, 0, nullptr, 2, correlationMasks}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpmvci, 0, 0, nullptr, 1, &subpass, 0, nullptr}; // Correlation masks must not overlap TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841", "VUID-VkRenderPassCreateInfo2KHR-pCorrelatedViewMasks-03056"); // Check for more specific "don't set any correlation masks when multiview is not enabled" if (rp2Supported) { viewMasks[0] = 0; correlationMasks[0] = 0; correlationMasks[1] = 0; safe_VkRenderPassCreateInfo2KHR safe_rpci2; ConvertVkRenderPassCreateInfoToV2KHR(&rpci, &safe_rpci2); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassCreateInfo2KHR-viewMask-03057"); VkRenderPass rp; VkResult err = vkCreateRenderPass2KHR(m_device->device(), safe_rpci2.ptr(), nullptr, &rp); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, RenderPassCreateInvalidViewMasks) { TEST_DESCRIPTION("Create a subpass with the wrong number of view masks, or inconsistent setting of view masks"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MULTIVIEW_EXTENSION_NAME); return; } // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; uint32_t viewMasks[] = {0x3u, 0u}; VkRenderPassMultiviewCreateInfo rpmvci = { VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, nullptr, 1, viewMasks, 0, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpmvci, 0, 0, nullptr, 2, subpasses, 0, nullptr}; // Not enough view masks TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkRenderPassCreateInfo-pNext-01928", "VUID-VkRenderPassCreateInfo2KHR-viewMask-03058"); } TEST_F(VkLayerTest, RenderPassCreateInvalidInputAttachmentReferences) { TEST_DESCRIPTION("Create a subpass with the meta data aspect mask set for an input attachment"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MAINTENANCE2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkAttachmentDescription attach = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 0, nullptr, nullptr, nullptr, 0, nullptr}; VkInputAttachmentAspectReference iaar = {0, 0, VK_IMAGE_ASPECT_METADATA_BIT}; VkRenderPassInputAttachmentAspectCreateInfo rpiaaci = {VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, nullptr, 1, &iaar}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpiaaci, 0, 1, &attach, 1, &subpass, 0, nullptr}; // Invalid meta data aspect m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassCreateInfo-pNext-01963"); // Cannot/should not avoid getting this one too TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, nullptr, "VUID-VkInputAttachmentAspectReference-aspectMask-01964", nullptr); // Aspect not present iaar.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, nullptr, "VUID-VkRenderPassCreateInfo-pNext-01963", nullptr); // Invalid subpass index iaar.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; iaar.subpass = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, nullptr, "VUID-VkRenderPassCreateInfo-pNext-01926", nullptr); iaar.subpass = 0; // Invalid input attachment index iaar.inputAttachmentIndex = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, nullptr, "VUID-VkRenderPassCreateInfo-pNext-01927", nullptr); } TEST_F(VkLayerTest, RenderPassCreateSubpassNonGraphicsPipeline) { TEST_DESCRIPTION("Create a subpass with the compute pipeline bind point"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_COMPUTE, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, subpasses, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-pipelineBindPoint-00844", "VUID-VkSubpassDescription2KHR-pipelineBindPoint-03062"); } TEST_F(VkLayerTest, RenderPassCreateSubpassMissingAttributesBitMultiviewNVX) { TEST_DESCRIPTION("Create a subpass with the VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX flag missing"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; if (DeviceExtensionSupported(gpu(), nullptr, VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME); return; } // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } VkSubpassDescription subpasses[] = { {VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, subpasses, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-flags-00856", "VUID-VkSubpassDescription2KHR-flags-03076"); } TEST_F(VkLayerTest, RenderPassCreate2SubpassInvalidInputAttachmentParameters) { TEST_DESCRIPTION("Create a subpass with parameters in the input attachment ref which are invalid"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); VkResult err; VkAttachmentReference2KHR reference = {VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR, nullptr, VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_UNDEFINED, 0}; VkSubpassDescription2KHR subpass = {VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR, nullptr, 0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, 1, &reference, 0, nullptr, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo2KHR rpci2 = { VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR, nullptr, 0, 0, nullptr, 1, &subpass, 0, nullptr, 0, nullptr}; VkRenderPass rp; // Test for aspect mask of 0 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription2KHR-aspectMask-03176"); err = vkCreateRenderPass2KHR(m_device->device(), &rpci2, nullptr, &rp); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyFound(); // Test for invalid aspect mask bits reference.aspectMask |= VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription2KHR-aspectMask-03175"); err = vkCreateRenderPass2KHR(m_device->device(), &rpci2, nullptr, &rp); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, RenderPassCreateInvalidSubpassDependencies) { // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; bool multiviewSupported = false; if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); multiviewSupported = true; } // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); rp2Supported = true; } // Add a device features struct enabling NO features VkPhysicalDeviceFeatures features = {0}; ASSERT_NO_FATAL_FAILURE(InitState(&features)); if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { multiviewSupported = true; } if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } // Create two dummy subpasses VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dependency; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 2, subpasses, 1, &dependency}; // dependency = { 0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0 }; // Source subpass is not EXTERNAL, so source stage mask must not include HOST dependency = {0, 1, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-srcSubpass-00858", "VUID-VkSubpassDependency2KHR-srcSubpass-03078"); // Destination subpass is not EXTERNAL, so destination stage mask must not include HOST dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-dstSubpass-00859", "VUID-VkSubpassDependency2KHR-dstSubpass-03079"); // Geometry shaders not enabled source dependency = {0, 1, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-srcStageMask-00860", "VUID-VkSubpassDependency2KHR-srcStageMask-03080"); // Geometry shaders not enabled destination dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-dstStageMask-00861", "VUID-VkSubpassDependency2KHR-dstStageMask-03081"); // Tessellation not enabled source dependency = {0, 1, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-srcStageMask-00862", "VUID-VkSubpassDependency2KHR-srcStageMask-03082"); // Tessellation not enabled destination dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-dstStageMask-00863", "VUID-VkSubpassDependency2KHR-dstStageMask-03083"); // Potential cyclical dependency dependency = {1, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-srcSubpass-00864", "VUID-VkSubpassDependency2KHR-srcSubpass-03084"); // EXTERNAL to EXTERNAL dependency dependency = { VK_SUBPASS_EXTERNAL, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-srcSubpass-00865", "VUID-VkSubpassDependency2KHR-srcSubpass-03085"); // Source compute stage not part of subpass 0's GRAPHICS pipeline dependency = {0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkRenderPassCreateInfo-pDependencies-00837", "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03054"); // Destination compute stage not part of subpass 0's GRAPHICS pipeline dependency = {VK_SUBPASS_EXTERNAL, 0, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkRenderPassCreateInfo-pDependencies-00838", "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03055"); // Non graphics stage in self dependency dependency = {0, 0, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-srcSubpass-01989", "VUID-VkSubpassDependency2KHR-srcSubpass-02244"); // Logically later source stages in self dependency dependency = {0, 0, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-srcSubpass-00867", "VUID-VkSubpassDependency2KHR-srcSubpass-03087"); // Source access mask mismatch with source stage mask dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_ACCESS_UNIFORM_READ_BIT, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-srcAccessMask-00868", "VUID-VkSubpassDependency2KHR-srcAccessMask-03088"); // Destination access mask mismatch with destination stage mask dependency = { 0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-dstAccessMask-00869", "VUID-VkSubpassDependency2KHR-dstAccessMask-03089"); if (multiviewSupported) { // VIEW_LOCAL_BIT but multiview is not enabled dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, nullptr, "VUID-VkRenderPassCreateInfo2KHR-viewMask-03059"); // Enable multiview uint32_t pViewMasks[2] = {0x3u, 0x3u}; int32_t pViewOffsets[2] = {0, 0}; VkRenderPassMultiviewCreateInfo rpmvci = { VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, nullptr, 2, pViewMasks, 0, nullptr, 0, nullptr}; rpci.pNext = &rpmvci; // Excessive view offsets dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; rpmvci.pViewOffsets = pViewOffsets; rpmvci.dependencyCount = 2; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, nullptr, "VUID-VkRenderPassCreateInfo-pNext-01929", nullptr); rpmvci.dependencyCount = 0; // View offset with subpass self dependency dependency = {0, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; rpmvci.pViewOffsets = pViewOffsets; pViewOffsets[0] = 1; rpmvci.dependencyCount = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, nullptr, "VUID-VkRenderPassCreateInfo-pNext-01930", nullptr); rpmvci.dependencyCount = 0; // View offset with no view local bit if (rp2Supported) { dependency = {0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; rpmvci.pViewOffsets = pViewOffsets; pViewOffsets[0] = 1; rpmvci.dependencyCount = 1; safe_VkRenderPassCreateInfo2KHR safe_rpci2; ConvertVkRenderPassCreateInfoToV2KHR(&rpci, &safe_rpci2); TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, nullptr, "VUID-VkSubpassDependency2KHR-dependencyFlags-03092"); rpmvci.dependencyCount = 0; } // EXTERNAL subpass with VIEW_LOCAL_BIT - source subpass dependency = {VK_SUBPASS_EXTERNAL, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-dependencyFlags-02520", "VUID-VkSubpassDependency2KHR-dependencyFlags-03090"); // EXTERNAL subpass with VIEW_LOCAL_BIT - destination subpass dependency = {0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-dependencyFlags-02521", "VUID-VkSubpassDependency2KHR-dependencyFlags-03091"); // Multiple views but no view local bit in self-dependency dependency = {0, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDependency-srcSubpass-00872", "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03060"); } } TEST_F(VkLayerTest, RenderPassCreateInvalidMixedAttachmentSamplesAMD) { TEST_DESCRIPTION("Verify error messages for supported and unsupported sample counts in render pass attachments."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = nullptr; bool rp2Supported = false; if (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); return; } // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); } std::vector<VkAttachmentDescription> attachments; { VkAttachmentDescription att = {}; att.format = VK_FORMAT_R8G8B8A8_UNORM; att.samples = VK_SAMPLE_COUNT_1_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; attachments.push_back(att); att.format = VK_FORMAT_D16_UNORM; att.samples = VK_SAMPLE_COUNT_4_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE; att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attachments.push_back(att); } VkAttachmentReference color_ref = {}; color_ref.attachment = 0; color_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference depth_ref = {}; depth_ref.attachment = 1; depth_ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_ref; subpass.pDepthStencilAttachment = &depth_ref; VkRenderPassCreateInfo rpci = {}; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rpci.attachmentCount = attachments.size(); rpci.pAttachments = attachments.data(); rpci.subpassCount = 1; rpci.pSubpasses = &subpass; m_errorMonitor->ExpectSuccess(); VkRenderPass rp; VkResult err; err = vkCreateRenderPass(device(), &rpci, NULL, &rp); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); // Expect an error message for invalid sample counts attachments[0].samples = VK_SAMPLE_COUNT_4_BIT; attachments[1].samples = VK_SAMPLE_COUNT_1_BIT; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, vkCreateRenderPass2KHR, "VUID-VkSubpassDescription-pColorAttachments-01506", "VUID-VkSubpassDescription2KHR-pColorAttachments-03070"); } static void TestRenderPassBegin(ErrorMonitor *error_monitor, const VkCommandBuffer command_buffer, const VkRenderPassBeginInfo *begin_info, PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR, const char *rp1_vuid, const char *rp2_vuid) { // "... must be less than the total number of attachments ..." VkCommandBufferBeginInfo cmd_begin_info = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, nullptr}; if (rp1_vuid) { vkBeginCommandBuffer(command_buffer, &cmd_begin_info); error_monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, rp1_vuid); vkCmdBeginRenderPass(command_buffer, begin_info, VK_SUBPASS_CONTENTS_INLINE); error_monitor->VerifyFound(); vkResetCommandBuffer(command_buffer, 0); } if (vkCmdBeginRenderPass2KHR && rp2_vuid) { VkSubpassBeginInfoKHR subpass_begin_info = {VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR, nullptr, VK_SUBPASS_CONTENTS_INLINE}; vkBeginCommandBuffer(command_buffer, &cmd_begin_info); error_monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, rp2_vuid); vkCmdBeginRenderPass2KHR(command_buffer, begin_info, &subpass_begin_info); error_monitor->VerifyFound(); vkResetCommandBuffer(command_buffer, 0); } } TEST_F(VkLayerTest, RenderPassBeginInvalidRenderArea) { TEST_DESCRIPTION("Generate INVALID_RENDER_AREA error by beginning renderpass with extent outside of framebuffer"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR = nullptr; bool rp2Supported = false; // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); if (rp2Supported) { vkCmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdBeginRenderPass2KHR"); } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Framebuffer for render target is 256x256, exceed that for INVALID_RENDER_AREA m_renderPassBeginInfo.renderArea.extent.width = 257; m_renderPassBeginInfo.renderArea.extent.height = 257; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &m_renderPassBeginInfo, vkCmdBeginRenderPass2KHR, "Cannot execute a render pass with renderArea not within the bound of the framebuffer.", "Cannot execute a render pass with renderArea not within the bound of the framebuffer."); } TEST_F(VkLayerTest, RenderPassBeginWithinRenderPass) { // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR = nullptr; bool rp2Supported = false; // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdBeginRenderPass2KHR"); } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Bind a BeginRenderPass within an active RenderPass m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Just use a dummy Renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBeginRenderPass-renderpass"); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); if (rp2Supported) { VkSubpassBeginInfoKHR subpassBeginInfo = {VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR, nullptr, VK_SUBPASS_CONTENTS_INLINE}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBeginRenderPass2KHR-renderpass"); vkCmdBeginRenderPass2KHR(m_commandBuffer->handle(), &m_renderPassBeginInfo, &subpassBeginInfo); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, RenderPassBeginIncompatibleFramebufferRenderPass) { TEST_DESCRIPTION("Test that renderpass begin is compatible with the framebuffer renderpass "); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Create a depth stencil image view VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_D16_UNORM, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.initialized()); VkImageView dsv; VkImageViewCreateInfo dsvci = {}; dsvci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; dsvci.pNext = nullptr; dsvci.image = image.handle(); dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D; dsvci.format = VK_FORMAT_D16_UNORM; dsvci.subresourceRange.layerCount = 1; dsvci.subresourceRange.baseMipLevel = 0; dsvci.subresourceRange.levelCount = 1; dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vkCreateImageView(m_device->device(), &dsvci, NULL, &dsv); // Create a renderPass with a single attachment that uses loadOp CLEAR VkAttachmentDescription description = {0, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference depth_stencil_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &depth_stencil_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &description, 1, &subpass, 0, nullptr}; VkRenderPass rp1, rp2; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp1); subpass.pDepthStencilAttachment = nullptr; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp2); // Create a framebuffer VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp1, 1, &dsv, 128, 128, 1}; VkFramebuffer fb; vkCreateFramebuffer(m_device->handle(), &fbci, nullptr, &fb); VkRenderPassBeginInfo rp_begin = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp2, fb, {{0, 0}, {128, 128}}, 0, nullptr}; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, nullptr, "VUID-VkRenderPassBeginInfo-renderPass-00904", nullptr); vkDestroyRenderPass(m_device->device(), rp1, nullptr); vkDestroyRenderPass(m_device->device(), rp2, nullptr); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), dsv, nullptr); } TEST_F(VkLayerTest, RenderPassBeginLayoutsFramebufferImageUsageMismatches) { TEST_DESCRIPTION( "Test that renderpass initial/final layouts match up with the usage bits set for each attachment of the framebuffer"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR = nullptr; bool rp2Supported = false; bool maintenance2Supported = false; // Check for VK_KHR_maintenance2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); maintenance2Supported = true; } // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { maintenance2Supported = true; } if (rp2Supported) { vkCmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdBeginRenderPass2KHR"); } // Create an input attachment view VkImageObj iai(m_device); iai.InitNoLayout(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(iai.initialized()); VkImageView iav; VkImageViewCreateInfo iavci = {}; iavci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; iavci.pNext = nullptr; iavci.image = iai.handle(); iavci.viewType = VK_IMAGE_VIEW_TYPE_2D; iavci.format = VK_FORMAT_R8G8B8A8_UNORM; iavci.subresourceRange.layerCount = 1; iavci.subresourceRange.baseMipLevel = 0; iavci.subresourceRange.levelCount = 1; iavci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &iavci, NULL, &iav); // Create a color attachment view VkImageObj cai(m_device); cai.InitNoLayout(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(cai.initialized()); VkImageView cav; VkImageViewCreateInfo cavci = {}; cavci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; cavci.pNext = nullptr; cavci.image = cai.handle(); cavci.viewType = VK_IMAGE_VIEW_TYPE_2D; cavci.format = VK_FORMAT_R8G8B8A8_UNORM; cavci.subresourceRange.layerCount = 1; cavci.subresourceRange.baseMipLevel = 0; cavci.subresourceRange.levelCount = 1; cavci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &cavci, NULL, &cav); // Create a renderPass with those attachments VkAttachmentDescription descriptions[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, {1, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}}; VkAttachmentReference input_ref = {0, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference color_ref = {1, VK_IMAGE_LAYOUT_GENERAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input_ref, 1, &color_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descriptions, 1, &subpass, 0, nullptr}; VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); // Create a framebuffer VkImageView views[] = {iav, cav}; VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 2, views, 128, 128, 1}; VkFramebuffer fb; vkCreateFramebuffer(m_device->handle(), &fbci, nullptr, &fb); VkRenderPassBeginInfo rp_begin = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {128, 128}}, 0, nullptr}; VkRenderPass rp_invalid; // Initial layout is VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but attachment doesn't support IMAGE_USAGE_COLOR_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, vkCmdBeginRenderPass2KHR, "VUID-vkCmdBeginRenderPass-initialLayout-00895", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03094"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL but attachment doesn't support VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT // / VK_IMAGE_USAGE_SAMPLED_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_GENERAL; descriptions[1].initialLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, vkCmdBeginRenderPass2KHR, "VUID-vkCmdBeginRenderPass-initialLayout-00897", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03097"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); descriptions[1].initialLayout = VK_IMAGE_LAYOUT_GENERAL; // Initial layout is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but attachment doesn't support VK_IMAGE_USAGE_TRANSFER_SRC_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, vkCmdBeginRenderPass2KHR, "VUID-vkCmdBeginRenderPass-initialLayout-00898", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03098"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL but attachment doesn't support VK_IMAGE_USAGE_TRANSFER_DST_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, vkCmdBeginRenderPass2KHR, "VUID-vkCmdBeginRenderPass-initialLayout-00899", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03099"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; const char *initial_layout_vuid_rp1 = maintenance2Supported ? "VUID-vkCmdBeginRenderPass-initialLayout-01758" : "VUID-vkCmdBeginRenderPass-initialLayout-00896"; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, vkCmdBeginRenderPass2KHR, initial_layout_vuid_rp1, "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, vkCmdBeginRenderPass2KHR, initial_layout_vuid_rp1, "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); if (maintenance2Supported || rp2Supported) { // Initial layout is VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, vkCmdBeginRenderPass2KHR, "VUID-vkCmdBeginRenderPass-initialLayout-01758", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, vkCmdBeginRenderPass2KHR, "VUID-vkCmdBeginRenderPass-initialLayout-01758", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); } vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), iav, nullptr); vkDestroyImageView(m_device->device(), cav, nullptr); } TEST_F(VkLayerTest, RenderPassBeginClearOpMismatch) { TEST_DESCRIPTION( "Begin a renderPass where clearValueCount is less than the number of renderPass attachments that use " "loadOp VK_ATTACHMENT_LOAD_OP_CLEAR."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR = nullptr; bool rp2Supported = false; // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); if (rp2Supported) { vkCmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdBeginRenderPass2KHR"); } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create a renderPass with a single attachment that uses loadOp CLEAR VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; // Set loadOp to CLEAR attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); VkRenderPassBeginInfo rp_begin = {}; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = renderPass(); rp_begin.framebuffer = framebuffer(); rp_begin.clearValueCount = 0; // Should be 1 TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, vkCmdBeginRenderPass2KHR, "VUID-VkRenderPassBeginInfo-clearValueCount-00902", "VUID-VkRenderPassBeginInfo-clearValueCount-00902"); vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, RenderPassBeginSampleLocationsInvalidIndicesEXT) { TEST_DESCRIPTION("Test that attachment indices and subpass indices specifed by sample locations structures are valid"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Create a depth stencil image view VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_D16_UNORM, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.initialized()); VkImageView dsv; VkImageViewCreateInfo dsvci = {}; dsvci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; dsvci.pNext = nullptr; dsvci.image = image.handle(); dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D; dsvci.format = VK_FORMAT_D16_UNORM; dsvci.subresourceRange.layerCount = 1; dsvci.subresourceRange.baseMipLevel = 0; dsvci.subresourceRange.levelCount = 1; dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vkCreateImageView(m_device->device(), &dsvci, NULL, &dsv); // Create a renderPass with a single attachment that uses loadOp CLEAR VkAttachmentDescription description = {0, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference depth_stencil_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &depth_stencil_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &description, 1, &subpass, 0, nullptr}; VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); // Create a framebuffer VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &dsv, 128, 128, 1}; VkFramebuffer fb; vkCreateFramebuffer(m_device->handle(), &fbci, nullptr, &fb); VkSampleLocationEXT sample_location = {0.5, 0.5}; VkSampleLocationsInfoEXT sample_locations_info = { VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT, nullptr, VK_SAMPLE_COUNT_1_BIT, {1, 1}, 1, &sample_location}; VkAttachmentSampleLocationsEXT attachment_sample_locations = {0, sample_locations_info}; VkSubpassSampleLocationsEXT subpass_sample_locations = {0, sample_locations_info}; VkRenderPassSampleLocationsBeginInfoEXT rp_sl_begin = {VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT, nullptr, 1, &attachment_sample_locations, 1, &subpass_sample_locations}; VkRenderPassBeginInfo rp_begin = { VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, &rp_sl_begin, rp, fb, {{0, 0}, {128, 128}}, 0, nullptr}; attachment_sample_locations.attachmentIndex = 1; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, nullptr, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531", nullptr); attachment_sample_locations.attachmentIndex = 0; subpass_sample_locations.subpassIndex = 1; TestRenderPassBegin(m_errorMonitor, m_commandBuffer->handle(), &rp_begin, nullptr, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532", nullptr); subpass_sample_locations.subpassIndex = 0; vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), dsv, nullptr); } TEST_F(VkLayerTest, RenderPassNextSubpassExcessive) { TEST_DESCRIPTION("Test that an error is produced when CmdNextSubpass is called too many times in a renderpass instance"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR = nullptr; bool rp2Supported = false; // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCmdNextSubpass2KHR = (PFN_vkCmdNextSubpass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdNextSubpass2KHR"); } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdNextSubpass-None-00909"); vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); if (rp2Supported) { VkSubpassBeginInfoKHR subpassBeginInfo = {VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR, nullptr, VK_SUBPASS_CONTENTS_INLINE}; VkSubpassEndInfoKHR subpassEndInfo = {VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR, nullptr}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdNextSubpass2KHR-None-03102"); vkCmdNextSubpass2KHR(m_commandBuffer->handle(), &subpassBeginInfo, &subpassEndInfo); m_errorMonitor->VerifyFound(); } m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, RenderPassEndBeforeFinalSubpass) { TEST_DESCRIPTION("Test that an error is produced when CmdEndRenderPass is called before the final subpass has been reached"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR = nullptr; bool rp2Supported = false; // Check for VK_KHR_create_renderpass2 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); rp2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); if (rp2Supported) { vkCmdEndRenderPass2KHR = (PFN_vkCmdEndRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdEndRenderPass2KHR"); } VkSubpassDescription sd[2] = {{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}}; VkRenderPassCreateInfo rcpi = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 2, sd, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rcpi, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 0, nullptr, 16, 16, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {16, 16}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdEndRenderPass-None-00910"); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); if (rp2Supported) { VkSubpassEndInfoKHR subpassEndInfo = {VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR, nullptr}; m_commandBuffer->reset(); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdEndRenderPass2KHR-None-03103"); vkCmdEndRenderPass2KHR(m_commandBuffer->handle(), &subpassEndInfo); m_errorMonitor->VerifyFound(); } // Clean up. vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, RenderPassDestroyWhileInUse) { TEST_DESCRIPTION("Delete in-use renderPass."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create simple renderpass VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {}; rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rpbi.framebuffer = m_framebuffer; rpbi.renderPass = rp; m_commandBuffer->BeginRenderPass(rpbi); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyRenderPass-renderPass-00873"); vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyFound(); // Wait for queue to complete so we can safely destroy rp vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If renderPass is not VK_NULL_HANDLE, renderPass must be a valid VkRenderPass handle"); m_errorMonitor->SetUnexpectedError("Was it created? Has it already been destroyed?"); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassCreateAttachmentUsedTwiceOK) { TEST_DESCRIPTION("Attachment is used simultaneously as color and input, with the same layout. This is OK."); ASSERT_NO_FATAL_FAILURE(Init()); VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_GENERAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; VkRenderPass rp; m_errorMonitor->ExpectSuccess(); vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyNotFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassCreateInitialLayoutUndefined) { TEST_DESCRIPTION( "Ensure that CmdBeginRenderPass with an attachment's initialLayout of VK_IMAGE_LAYOUT_UNDEFINED works when the command " "buffer has prior knowledge of that attachment's layout."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Record a single command buffer which uses this renderpass twice. The // bug is triggered at the beginning of the second renderpass, when the // command buffer already has a layout recorded for the attachment. VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(m_commandBuffer->handle()); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyNotFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassCreateAttachmentLayoutWithLoadOpThenReadOnly) { TEST_DESCRIPTION( "Positive test where we create a renderpass with an attachment that uses LOAD_OP_CLEAR, the first subpass has a valid " "layout, and a second subpass then uses a valid *READ_ONLY* layout."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkAttachmentReference attach[2] = {}; attach[0].attachment = 0; attach[0].layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attach[1].attachment = 0; attach[1].layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; VkSubpassDescription subpasses[2] = {}; // First subpass clears DS attach on load subpasses[0].pDepthStencilAttachment = &attach[0]; // 2nd subpass reads in DS as input attachment subpasses[1].inputAttachmentCount = 1; subpasses[1].pInputAttachments = &attach[1]; VkAttachmentDescription attach_desc = {}; attach_desc.format = depth_format; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE; attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; VkRenderPassCreateInfo rpci = {}; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rpci.attachmentCount = 1; rpci.pAttachments = &attach_desc; rpci.subpassCount = 2; rpci.pSubpasses = subpasses; // Now create RenderPass and verify no errors VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyNotFound(); vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkPositiveLayerTest, RenderPassBeginSubpassZeroTransitionsApplied) { TEST_DESCRIPTION("Ensure that CmdBeginRenderPass applies the layout transitions for the first subpass"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 1, &dep}; VkResult err; VkRenderPass rp; err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Record a single command buffer which issues a pipeline barrier w/ // image memory barrier for the attachment. This detects the previously // missing tracking of the subpass layout by throwing a validation error // if it doesn't occur. VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); VkImageMemoryBarrier imb = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, nullptr, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, image.handle(), {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}}; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &imb); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassBeginTransitionsAttachmentUnused) { TEST_DESCRIPTION( "Ensure that layout transitions work correctly without errors, when an attachment reference is VK_ATTACHMENT_UNUSED"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with no attachments VkAttachmentReference att_ref = {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 0, nullptr, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Record a command buffer which just begins and ends the renderpass. The // bug manifests in BeginRenderPass. VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassBeginStencilLoadOp) { TEST_DESCRIPTION("Create a stencil-only attachment with a LOAD_OP set to CLEAR. stencil[Load|Store]Op used to be ignored."); VkResult result = VK_SUCCESS; ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageFormatProperties formatProps; vkGetPhysicalDeviceImageFormatProperties(gpu(), depth_format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0, &formatProps); if (formatProps.maxExtent.width < 100 || formatProps.maxExtent.height < 100) { printf("%s Image format max extent is too small.\n", kSkipPrefix); return; } VkFormat depth_stencil_fmt = depth_format; m_depthStencil->Init(m_device, 100, 100, depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT); VkAttachmentDescription att = {}; VkAttachmentReference ref = {}; att.format = depth_stencil_fmt; att.samples = VK_SAMPLE_COUNT_1_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; att.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE; att.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; att.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkClearValue clear; clear.depthStencil.depth = 1.0; clear.depthStencil.stencil = 0; ref.attachment = 0; ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.flags = 0; subpass.inputAttachmentCount = 0; subpass.pInputAttachments = NULL; subpass.colorAttachmentCount = 0; subpass.pColorAttachments = NULL; subpass.pResolveAttachments = NULL; subpass.pDepthStencilAttachment = &ref; subpass.preserveAttachmentCount = 0; subpass.pPreserveAttachments = NULL; VkRenderPass rp; VkRenderPassCreateInfo rp_info = {}; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.attachmentCount = 1; rp_info.pAttachments = &att; rp_info.subpassCount = 1; rp_info.pSubpasses = &subpass; result = vkCreateRenderPass(device(), &rp_info, NULL, &rp); ASSERT_VK_SUCCESS(result); VkImageView *depthView = m_depthStencil->BindInfo(); VkFramebufferCreateInfo fb_info = {}; fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; fb_info.pNext = NULL; fb_info.renderPass = rp; fb_info.attachmentCount = 1; fb_info.pAttachments = depthView; fb_info.width = 100; fb_info.height = 100; fb_info.layers = 1; VkFramebuffer fb; result = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); ASSERT_VK_SUCCESS(result); VkRenderPassBeginInfo rpbinfo = {}; rpbinfo.clearValueCount = 1; rpbinfo.pClearValues = &clear; rpbinfo.pNext = NULL; rpbinfo.renderPass = rp; rpbinfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rpbinfo.renderArea.extent.width = 100; rpbinfo.renderArea.extent.height = 100; rpbinfo.renderArea.offset.x = 0; rpbinfo.renderArea.offset.y = 0; rpbinfo.framebuffer = fb; VkFenceObj fence; fence.init(*m_device, VkFenceObj::create_info()); ASSERT_TRUE(fence.initialized()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(rpbinfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(fence); VkImageObj destImage(m_device); destImage.Init(100, 100, 1, depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageMemoryBarrier barrier = {}; VkImageSubresourceRange range; barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; barrier.image = m_depthStencil->handle(); range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; range.baseMipLevel = 0; range.levelCount = 1; range.baseArrayLayer = 0; range.layerCount = 1; barrier.subresourceRange = range; fence.wait(VK_TRUE, UINT64_MAX); VkCommandBufferObj cmdbuf(m_device, m_commandPool); cmdbuf.begin(); cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); barrier.srcAccessMask = 0; barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; barrier.image = destImage.handle(); barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); VkImageCopy cregion; cregion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; cregion.srcSubresource.mipLevel = 0; cregion.srcSubresource.baseArrayLayer = 0; cregion.srcSubresource.layerCount = 1; cregion.srcOffset.x = 0; cregion.srcOffset.y = 0; cregion.srcOffset.z = 0; cregion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; cregion.dstSubresource.mipLevel = 0; cregion.dstSubresource.baseArrayLayer = 0; cregion.dstSubresource.layerCount = 1; cregion.dstOffset.x = 0; cregion.dstOffset.y = 0; cregion.dstOffset.z = 0; cregion.extent.width = 100; cregion.extent.height = 100; cregion.extent.depth = 1; cmdbuf.CopyImage(m_depthStencil->handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, destImage.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &cregion); cmdbuf.end(); VkSubmitInfo submit_info; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.pNext = NULL; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmdbuf.handle(); submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; m_errorMonitor->ExpectSuccess(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); vkQueueWaitIdle(m_device->m_queue); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyFramebuffer(m_device->device(), fb, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassBeginInlineAndSecondaryCommandBuffers) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyNotFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, RenderPassBeginDepthStencilLayoutTransitionFromUndefined) { TEST_DESCRIPTION( "Create a render pass with depth-stencil attachment where layout transition from UNDEFINED TO DS_READ_ONLY_OPTIMAL is set " "by render pass and verify that transition has correctly occurred at queue submit time with no validation errors."); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageFormatProperties format_props; vkGetPhysicalDeviceImageFormatProperties(gpu(), depth_format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, 0, &format_props); if (format_props.maxExtent.width < 32 || format_props.maxExtent.height < 32) { printf("%s Depth extent too small, RenderPassDepthStencilLayoutTransition skipped.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // A renderpass with one depth/stencil attachment. VkAttachmentDescription attachment = {0, depth_format, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &att_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible ds image. VkImageObj image(m_device); image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, depth_format, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_DEPTH_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyNotFound(); // Cleanup vkDestroyImageView(m_device->device(), view, NULL); vkDestroyRenderPass(m_device->device(), rp, NULL); vkDestroyFramebuffer(m_device->device(), fb, NULL); } TEST_F(VkLayerTest, DisabledIndependentBlend) { TEST_DESCRIPTION( "Generate INDEPENDENT_BLEND by disabling independent blend and then specifying different blend states for two " "attachments"); VkPhysicalDeviceFeatures features = {}; features.independentBlend = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&features)); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of pAttachments must be identical"); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkPipelineObj pipeline(m_device); // Create a renderPass with two color attachments VkAttachmentReference attachments[2] = {}; attachments[0].layout = VK_IMAGE_LAYOUT_GENERAL; attachments[1].attachment = 1; attachments[1].layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = attachments; subpass.colorAttachmentCount = 2; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 2; VkAttachmentDescription attach_desc[2] = {}; attach_desc[0].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[0].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc[1].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[1].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[1].finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass renderpass; vkCreateRenderPass(m_device->device(), &rpci, NULL, &renderpass); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); pipeline.AddShader(&vs); VkPipelineColorBlendAttachmentState att_state1 = {}, att_state2 = {}; att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state1.blendEnable = VK_TRUE; att_state2.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state2.blendEnable = VK_FALSE; pipeline.AddColorAttachment(0, att_state1); pipeline.AddColorAttachment(1, att_state2); pipeline.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderpass); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), renderpass, NULL); } // Is the Pipeline compatible with the expectations of the Renderpass/subpasses? TEST_F(VkLayerTest, PipelineRenderpassCompatibility) { TEST_DESCRIPTION( "Create a graphics pipeline that is incompatible with the requirements of its contained Renderpass/subpasses."); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetObj ds_obj(m_device); ds_obj.AppendDummy(); ds_obj.CreateVKDescriptorSet(m_commandBuffer); VkShaderObj vs_obj(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkPipelineColorBlendAttachmentState att_state1 = {}; att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state1.blendEnable = VK_TRUE; VkRenderpassObj rp_obj(m_device); { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753"); VkPipelineObj pipeline(m_device); pipeline.AddShader(&vs_obj); pipeline.AddColorAttachment(0, att_state1); VkGraphicsPipelineCreateInfo info = {}; pipeline.InitGraphicsPipelineCreateInfo(&info); info.pColorBlendState = nullptr; pipeline.CreateVKPipeline(ds_obj.GetPipelineLayout(), rp_obj.handle(), &info); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, FramebufferCreateErrors) { TEST_DESCRIPTION( "Hit errors when attempting to create a framebuffer :\n" " 1. Mismatch between framebuffer & renderPass attachmentCount\n" " 2. Use a color image as depthStencil attachment\n" " 3. Mismatch framebuffer & renderPass attachment formats\n" " 4. Mismatch framebuffer & renderPass attachment #samples\n" " 5. Framebuffer attachment w/ non-1 mip-levels\n" " 6. Framebuffer attachment where dimensions don't match\n" " 7. Framebuffer attachment where dimensions don't match\n" " 8. Framebuffer attachment w/o identity swizzle\n" " 9. framebuffer dimensions exceed physical device limits\n"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-attachmentCount-00876"); // Create a renderPass with a single color attachment VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); VkImageView ivs[2]; ivs[0] = m_renderTargets[0]->targetView(VK_FORMAT_B8G8R8A8_UNORM); ivs[1] = m_renderTargets[0]->targetView(VK_FORMAT_B8G8R8A8_UNORM); VkFramebufferCreateInfo fb_info = {}; fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; fb_info.pNext = NULL; fb_info.renderPass = rp; // Set mis-matching attachmentCount fb_info.attachmentCount = 2; fb_info.pAttachments = ivs; fb_info.width = 100; fb_info.height = 100; fb_info.layers = 1; VkFramebuffer fb; err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); // Create a renderPass with a depth-stencil attachment created with // IMAGE_USAGE_COLOR_ATTACHMENT // Add our color attachment to pDepthStencilAttachment subpass.pDepthStencilAttachment = &attach; subpass.pColorAttachments = NULL; VkRenderPass rp_ds; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_ds); ASSERT_VK_SUCCESS(err); // Set correct attachment count, but attachment has COLOR usage bit set fb_info.attachmentCount = 1; fb_info.renderPass = rp_ds; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-02603"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp_ds, NULL); // Create new renderpass with alternate attachment format from fb attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; subpass.pDepthStencilAttachment = NULL; subpass.pColorAttachments = &attach; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); // Cause error due to mis-matched formats between rp & fb // rp attachment 0 now has RGBA8 but corresponding fb attach is BGRA8 fb_info.renderPass = rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00880"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); // Create new renderpass with alternate sample count from fb attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_4_BIT; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); // Cause error due to mis-matched sample count between rp & fb fb_info.renderPass = rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00881"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); { // Create an image with 2 mip levels. VkImageObj image(m_device); image.Init(128, 128, 2, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); // Create a image view with two mip levels. VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_B8G8R8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; // Set level count to 2 (only 1 is allowed for FB attachment) ivci.subresourceRange.levelCount = 2; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); // Re-create renderpass to have matching sample count attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); fb_info.renderPass = rp; fb_info.pAttachments = &view; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00883"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyImageView(m_device->device(), view, NULL); } // Update view to original color buffer and grow FB dimensions too big fb_info.pAttachments = ivs; fb_info.height = 1024; fb_info.width = 1024; fb_info.layers = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } { // Create an image with one mip level. VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); // Create view attachment with non-identity swizzle VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_B8G8R8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; ivci.components.r = VK_COMPONENT_SWIZZLE_G; ivci.components.g = VK_COMPONENT_SWIZZLE_R; ivci.components.b = VK_COMPONENT_SWIZZLE_A; ivci.components.a = VK_COMPONENT_SWIZZLE_B; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); fb_info.pAttachments = &view; fb_info.height = 100; fb_info.width = 100; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00884"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyImageView(m_device->device(), view, NULL); } // reset attachment to color attachment fb_info.pAttachments = ivs; // Request fb that exceeds max width fb_info.width = m_device->props.limits.maxFramebufferWidth + 1; fb_info.height = 100; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-width-00886"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // and width=0 fb_info.width = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-width-00885"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // Request fb that exceeds max height fb_info.width = 100; fb_info.height = m_device->props.limits.maxFramebufferHeight + 1; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-height-00888"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // and height=0 fb_info.height = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-height-00887"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // Request fb that exceeds max layers fb_info.width = 100; fb_info.height = 100; fb_info.layers = m_device->props.limits.maxFramebufferLayers + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-layers-00890"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // and layers=0 fb_info.layers = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-layers-00889"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, PointSizeFailure) { TEST_DESCRIPTION("Create a pipeline using TOPOLOGY_POINT_LIST but do not set PointSize in vertex shader."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Pipeline topology is set to POINT_LIST"); ASSERT_NO_FATAL_FAILURE(InitViewport()); // Create VS declaring PointSize but not writing to it static const char NoPointSizeVertShader[] = "#version 450\n" "vec2 vertices[3];\n" "out gl_PerVertex\n" "{\n" " vec4 gl_Position;\n" " float gl_PointSize;\n" "};\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" "}\n"; VkShaderObj vs(m_device, NoPointSizeVertShader, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&ps); // Set Input Assembly to TOPOLOGY POINT LIST VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; pipelineobj.SetInputAssembly(&ia_state); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, PointSizeGeomShaderFailure) { TEST_DESCRIPTION( "Create a pipeline using TOPOLOGY_POINT_LIST, set PointSize vertex shader, but not in the final geometry stage."); ASSERT_NO_FATAL_FAILURE(Init()); if ((!m_device->phy().features().geometryShader) || (!m_device->phy().features().shaderTessellationAndGeometryPointSize)) { printf("%s Device does not support the required geometry shader features; skipped.\n", kSkipPrefix); return; } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Pipeline topology is set to POINT_LIST"); ASSERT_NO_FATAL_FAILURE(InitViewport()); // Create VS declaring PointSize and writing to it static const char PointSizeVertShader[] = "#version 450\n" "vec2 vertices[3];\n" "out gl_PerVertex\n" "{\n" " vec4 gl_Position;\n" " float gl_PointSize;\n" "};\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " gl_PointSize = 5.0;\n" "}\n"; static char const *gsSource = "#version 450\n" "layout (points) in;\n" "layout (points) out;\n" "layout (max_vertices = 1) out;\n" "void main() {\n" " gl_Position = vec4(1.0, 0.5, 0.5, 0.0);\n" " EmitVertex();\n" "}\n"; VkShaderObj vs(m_device, PointSizeVertShader, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj gs(m_device, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&gs); pipelineobj.AddShader(&ps); // Set Input Assembly to TOPOLOGY POINT LIST VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; pipelineobj.SetInputAssembly(&ia_state); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicDepthBiasNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Depth Bias dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic depth bias m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic depth bias state not set for this command buffer"); VKTriangleTest(BsoFailDepthBias); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicLineWidthNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Line Width dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic line width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic line width state not set for this command buffer"); VKTriangleTest(BsoFailLineWidth); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicViewportNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Viewport dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic viewport state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic viewport(s) 0 are used by pipeline state object, but were not provided"); VKTriangleTest(BsoFailViewport); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicScissorNotBound) { TEST_DESCRIPTION("Run a simple draw calls to validate failure when Scissor dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic scissor state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic scissor(s) 0 are used by pipeline state object, but were not provided"); VKTriangleTest(BsoFailScissor); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicBlendConstantsNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Blend Constants dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic blend constant state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic blend constants state not set for this command buffer"); VKTriangleTest(BsoFailBlend); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicDepthBoundsNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Depth Bounds dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); if (!m_device->phy().features().depthBounds) { printf("%s Device does not support depthBounds test; skipped.\n", kSkipPrefix); return; } // Dynamic depth bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic depth bounds state not set for this command buffer"); VKTriangleTest(BsoFailDepthBounds); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicStencilReadNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Stencil Read dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic stencil read mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil read mask state not set for this command buffer"); VKTriangleTest(BsoFailStencilReadMask); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicStencilWriteNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Stencil Write dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic stencil write mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil write mask state not set for this command buffer"); VKTriangleTest(BsoFailStencilWriteMask); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicStencilRefNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Stencil Ref dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic stencil reference m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil reference state not set for this command buffer"); VKTriangleTest(BsoFailStencilReference); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferNotBound) { TEST_DESCRIPTION("Run an indexed draw call without an index buffer bound."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Index buffer object not bound to this command buffer when Indexed "); VKTriangleTest(BsoFailIndexBuffer); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadSize) { TEST_DESCRIPTION("Run indexed draw call with bad index buffer size."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadSize); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadOffset) { TEST_DESCRIPTION("Run indexed draw call with bad index buffer offset."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadOffset); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadBindSize) { TEST_DESCRIPTION("Run bind index buffer with a size greater than the index buffer."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadMapSize); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadBindOffset) { TEST_DESCRIPTION("Run bind index buffer with an offset greater than the size of the index buffer."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadMapOffset); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CommandBufferTwoSubmits) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // We luck out b/c by default the framework creates CB w/ the // VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, nullptr, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->end(); // Bypass framework since it does the waits automatically VkResult err = VK_SUCCESS; VkSubmitInfo submit_info; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.pNext = NULL; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); vkQueueWaitIdle(m_device->m_queue); // Cause validation error by re-submitting cmd buffer that should only be // submitted once err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, AllocDescriptorFromEmptyPool) { TEST_DESCRIPTION("Attempt to allocate more sets and descriptors than descriptor pool has available."); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // This test is valid for Vulkan 1.0 only -- skip if device has an API version greater than 1.0. if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { printf("%s Device has apiVersion greater than 1.0 -- skipping Descriptor Set checks.\n", kSkipPrefix); return; } // Create Pool w/ 1 Sampler descriptor, but try to alloc Uniform Buffer // descriptor from it VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count.descriptorCount = 2; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = 0; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding_samp = {}; dsl_binding_samp.binding = 0; dsl_binding_samp.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dsl_binding_samp.descriptorCount = 1; dsl_binding_samp.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding_samp.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_samp(m_device, {dsl_binding_samp}); // Try to allocate 2 sets when pool only has 1 set VkDescriptorSet descriptor_sets[2]; VkDescriptorSetLayout set_layouts[2] = {ds_layout_samp.handle(), ds_layout_samp.handle()}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 2; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = set_layouts; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-descriptorSetCount-00306"); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, descriptor_sets); m_errorMonitor->VerifyFound(); alloc_info.descriptorSetCount = 1; // Create layout w/ descriptor type not available in pool VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_ub(m_device, {dsl_binding}); VkDescriptorSet descriptor_set; alloc_info.descriptorSetCount = 1; alloc_info.pSetLayouts = &ds_layout_ub.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-descriptorPool-00307"); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, FreeDescriptorFromOneShotPool) { VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkFreeDescriptorSets-descriptorPool-00312"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.flags = 0; // Not specifying VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT means // app can only call vkResetDescriptorPool on this pool.; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); err = vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidDescriptorPool) { // Attempt to clear Descriptor Pool with bad object. // ObjectTracker should catch this. ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetDescriptorPool-descriptorPool-parameter"); uint64_t fake_pool_handle = 0xbaad6001; VkDescriptorPool bad_pool = reinterpret_cast<VkDescriptorPool &>(fake_pool_handle); vkResetDescriptorPool(device(), bad_pool, 0); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidDescriptorSet) { // Attempt to bind an invalid Descriptor Set to a valid Command Buffer // ObjectTracker should catch this. // Create a valid cmd buffer // call vkCmdBindDescriptorSets w/ false Descriptor Set uint64_t fake_set_handle = 0xbaad6001; VkDescriptorSet bad_set = reinterpret_cast<VkDescriptorSet &>(fake_set_handle); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding layout_binding = {}; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj descriptor_set_layout(m_device, {layout_binding}); const VkPipelineLayoutObj pipeline_layout(DeviceObj(), {&descriptor_set_layout}); m_commandBuffer->begin(); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &bad_set, 0, NULL); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidDescriptorSetLayout) { // Attempt to create a Pipeline Layout with an invalid Descriptor Set Layout. // ObjectTracker should catch this. uint64_t fake_layout_handle = 0xbaad6001; VkDescriptorSetLayout bad_layout = reinterpret_cast<VkDescriptorSetLayout &>(fake_layout_handle); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo plci = {}; plci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; plci.pNext = NULL; plci.setLayoutCount = 1; plci.pSetLayouts = &bad_layout; vkCreatePipelineLayout(device(), &plci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, WriteDescriptorSetIntegrityCheck) { TEST_DESCRIPTION( "This test verifies some requirements of chapter 13.2.3 of the Vulkan Spec " "1) A uniform buffer update must have a valid buffer index. " "2) When using an array of descriptors in a single WriteDescriptor, the descriptor types and stageflags " "must all be the same. " "3) Immutable Sampler state must match across descriptors"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00324"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorPoolSize ds_type_count[4] = {}; ds_type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count[0].descriptorCount = 1; ds_type_count[1].type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count[1].descriptorCount = 1; ds_type_count[2].type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count[2].descriptorCount = 1; ds_type_count[3].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; ds_type_count[3].descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = sizeof(ds_type_count) / sizeof(VkDescriptorPoolSize); ds_pool_ci.pPoolSizes = ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dslb1 = {}; dslb1.binding = 0; dslb1.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dslb1.descriptorCount = 1; dslb1.stageFlags = VK_SHADER_STAGE_ALL; dslb1.pImmutableSamplers = NULL; VkDescriptorSetLayoutBinding dslb2 = {}; dslb2.binding = 1; dslb2.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb2.descriptorCount = 1; dslb2.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb2.pImmutableSamplers = NULL; VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dslb3 = {}; dslb3.binding = 2; dslb3.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb3.descriptorCount = 1; dslb3.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb3.pImmutableSamplers = static_cast<VkSampler *>(&sampler); const std::vector<VkDescriptorSetLayoutBinding> layout_bindings = {dslb1, dslb2, dslb3}; const VkDescriptorSetLayoutObj ds_layout(m_device, layout_bindings); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); VkDescriptorSet descriptorSet; err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; // 1) The uniform buffer is intentionally invalid here vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), dyub, &mem_reqs); VkMemoryAllocateInfo mem_alloc_info = {}; mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc_info.allocationSize = mem_reqs.size; m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); err = vkAllocateMemory(m_device->device(), &mem_alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buffInfo[2] = {}; buffInfo[0].buffer = dyub; buffInfo[0].offset = 0; buffInfo[0].range = 1024; buffInfo[1].buffer = dyub; buffInfo[1].offset = 0; buffInfo[1].range = 1024; descriptor_write.pBufferInfo = buffInfo; descriptor_write.descriptorCount = 2; // 2) The stateFlags don't match between the first and second descriptor m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // 3) The second descriptor has a null_ptr pImmutableSamplers and // the third descriptor contains an immutable sampler descriptor_write.dstBinding = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; // Make pImageInfo index non-null to avoid complaints of it missing VkDescriptorImageInfo imageInfo = {}; imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; descriptor_write.pImageInfo = &imageInfo; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), dyub, NULL); vkFreeMemory(m_device->device(), mem, NULL); vkDestroySampler(m_device->device(), sampler, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, WriteDescriptorSetConsecutiveUpdates) { TEST_DESCRIPTION( "Verifies that updates rolling over to next descriptor work correctly by destroying buffer from consecutive update known " "to be used in descriptor set and verifying that error is flagged."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); uint32_t qfi = 0; VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; bci.size = 2048; bci.queueFamilyIndexCount = 1; bci.pQueueFamilyIndices = &qfi; VkBufferObj buffer0; buffer0.init(*m_device, bci); VkPipelineObj pipe(m_device); { // Scope 2nd buffer to cause early destruction VkBufferObj buffer1; bci.size = 1024; buffer1.init(*m_device, bci); VkDescriptorBufferInfo buffer_info[3] = {}; buffer_info[0].buffer = buffer0.handle(); buffer_info[0].offset = 0; buffer_info[0].range = 1024; buffer_info[1].buffer = buffer0.handle(); buffer_info[1].offset = 1024; buffer_info[1].range = 1024; buffer_info[2].buffer = buffer1.handle(); buffer_info[2].offset = 0; buffer_info[2].range = 1024; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; // descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 3; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = buffer_info; // Update descriptor vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO that uses the uniform buffers char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "layout(set=0) layout(binding=1) uniform blah { int x; } duh;\n" "void main(){\n" " x = vec4(duh.x, bar.y, bar.x, 1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkResult err = pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); } // buffer2 just went out of scope and was destroyed along with its memory m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer "); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound DeviceMemory "); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineLayoutExceedsSetLimit) { TEST_DESCRIPTION("Attempt to create a pipeline layout using more than the physical limit of SetLayouts."); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding layout_binding = {}; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_binding.pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &layout_binding; VkDescriptorSetLayout ds_layout = {}; VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); // Create an array of DSLs, one larger than the physical limit const auto excess_layouts = 1 + m_device->phy().properties().limits.maxBoundDescriptorSets; std::vector<VkDescriptorSetLayout> dsl_array(excess_layouts, ds_layout); VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = excess_layouts; pipeline_layout_ci.pSetLayouts = dsl_array.data(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286"); VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // Clean up vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, CreatePipelineLayoutExcessPerStageDescriptors) { TEST_DESCRIPTION("Attempt to create a pipeline layout where total descriptors exceed per-stage limits"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t max_uniform_buffers = m_device->phy().properties().limits.maxPerStageDescriptorUniformBuffers; uint32_t max_storage_buffers = m_device->phy().properties().limits.maxPerStageDescriptorStorageBuffers; uint32_t max_sampled_images = m_device->phy().properties().limits.maxPerStageDescriptorSampledImages; uint32_t max_storage_images = m_device->phy().properties().limits.maxPerStageDescriptorStorageImages; uint32_t max_samplers = m_device->phy().properties().limits.maxPerStageDescriptorSamplers; uint32_t max_combined = std::min(max_samplers, max_sampled_images); uint32_t max_input_attachments = m_device->phy().properties().limits.maxPerStageDescriptorInputAttachments; uint32_t sum_dyn_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffersDynamic; uint32_t sum_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffers; uint32_t sum_dyn_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffersDynamic; uint32_t sum_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffers; uint32_t sum_sampled_images = m_device->phy().properties().limits.maxDescriptorSetSampledImages; uint32_t sum_storage_images = m_device->phy().properties().limits.maxDescriptorSetStorageImages; uint32_t sum_samplers = m_device->phy().properties().limits.maxDescriptorSetSamplers; uint32_t sum_input_attachments = m_device->phy().properties().limits.maxDescriptorSetInputAttachments; // Devices that report UINT32_MAX for any of these limits can't run this test if (UINT32_MAX == std::max({max_uniform_buffers, max_storage_buffers, max_sampled_images, max_storage_images, max_samplers})) { printf("%s Physical device limits report as 2^32-1. Skipping test.\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding dslb = {}; std::vector<VkDescriptorSetLayoutBinding> dslb_vec = {}; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // VU 0fe0023e - too many sampler type descriptors in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb.descriptorCount = max_samplers; dslb.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = max_combined; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"); if ((max_samplers + max_combined) > sum_samplers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"); // expect all-stages sum too } if (max_combined > sum_sampled_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00240 - too many uniform buffer type descriptors in vertex stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dslb.descriptorCount = max_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"); if (dslb.descriptorCount > sum_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"); // expect all-stages sum too } if (dslb.descriptorCount > sum_dyn_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00242 - too many storage buffer type descriptors in compute stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.descriptorCount = max_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_ALL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"); if (dslb.descriptorCount > sum_dyn_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"); // expect all-stages sum too } if (dslb_vec[0].descriptorCount + dslb_vec[2].descriptorCount > sum_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00244 - too many sampled image type descriptors in multiple stages dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; dslb.descriptorCount = max_sampled_images; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorCount = max_combined; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"); if (max_combined + 2 * max_sampled_images > sum_sampled_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // expect all-stages sum too } if (max_combined > sum_samplers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00246 - too many storage image type descriptors in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dslb.descriptorCount = 1 + (max_storage_images / 2); dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"); if (2 * dslb.descriptorCount > sum_storage_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d18 - too many input attachments in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; dslb.descriptorCount = 1 + max_input_attachments; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"); if (dslb.descriptorCount > sum_input_attachments) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, CreatePipelineLayoutExcessDescriptorsOverall) { TEST_DESCRIPTION("Attempt to create a pipeline layout where total descriptors exceed limits"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t max_uniform_buffers = m_device->phy().properties().limits.maxPerStageDescriptorUniformBuffers; uint32_t max_storage_buffers = m_device->phy().properties().limits.maxPerStageDescriptorStorageBuffers; uint32_t max_sampled_images = m_device->phy().properties().limits.maxPerStageDescriptorSampledImages; uint32_t max_storage_images = m_device->phy().properties().limits.maxPerStageDescriptorStorageImages; uint32_t max_samplers = m_device->phy().properties().limits.maxPerStageDescriptorSamplers; uint32_t max_input_attachments = m_device->phy().properties().limits.maxPerStageDescriptorInputAttachments; uint32_t sum_dyn_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffersDynamic; uint32_t sum_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffers; uint32_t sum_dyn_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffersDynamic; uint32_t sum_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffers; uint32_t sum_sampled_images = m_device->phy().properties().limits.maxDescriptorSetSampledImages; uint32_t sum_storage_images = m_device->phy().properties().limits.maxDescriptorSetStorageImages; uint32_t sum_samplers = m_device->phy().properties().limits.maxDescriptorSetSamplers; uint32_t sum_input_attachments = m_device->phy().properties().limits.maxDescriptorSetInputAttachments; // Devices that report UINT32_MAX for any of these limits can't run this test if (UINT32_MAX == std::max({sum_dyn_uniform_buffers, sum_uniform_buffers, sum_dyn_storage_buffers, sum_storage_buffers, sum_sampled_images, sum_storage_images, sum_samplers, sum_input_attachments})) { printf("%s Physical device limits report as 2^32-1. Skipping test.\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding dslb = {}; std::vector<VkDescriptorSetLayoutBinding> dslb_vec = {}; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // VU 0fe00d1a - too many sampler type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb.descriptorCount = sum_samplers / 2; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = sum_samplers - dslb.descriptorCount + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"); if (dslb.descriptorCount > max_samplers) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"); // Expect max-per-stage samplers exceeds limits } if (dslb.descriptorCount > sum_sampled_images) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // Expect max overall sampled image count exceeds limits } if (dslb.descriptorCount > max_sampled_images) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"); // Expect max per-stage sampled image count exceeds limits } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d1c - too many uniform buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dslb.descriptorCount = sum_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"); if (dslb.descriptorCount > max_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d1e - too many dynamic uniform buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dslb.descriptorCount = sum_dyn_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"); if (dslb.descriptorCount > max_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d20 - too many storage buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.descriptorCount = sum_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"); if (dslb.descriptorCount > max_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d22 - too many dynamic storage buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; dslb.descriptorCount = sum_dyn_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"); if (dslb.descriptorCount > max_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d24 - too many sampled image type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = max_samplers; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; // revisit: not robust to odd limits. uint32_t remaining = (max_samplers > sum_sampled_images ? 0 : (sum_sampled_images - max_samplers) / 2); dslb.descriptorCount = 1 + remaining; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); if (std::max(dslb_vec[0].descriptorCount, dslb_vec[1].descriptorCount) > max_sampled_images) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"); // Expect max-per-stage sampled images to exceed limits } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d26 - too many storage image type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dslb.descriptorCount = sum_storage_images / 2; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dslb.descriptorCount = sum_storage_images - dslb.descriptorCount + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"); if (dslb.descriptorCount > max_storage_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d28 - too many input attachment type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; dslb.descriptorCount = sum_input_attachments + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"); if (dslb.descriptorCount > max_input_attachments) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferBufferDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a buffer dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buf_info.size = 256; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = mem_reqs.size; bool pass = false; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); vkCmdFillBuffer(m_commandBuffer->handle(), buffer, 0, VK_WHOLE_SIZE, 0); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer "); // Destroy buffer dependency prior to submit to cause ERROR vkDestroyBuffer(m_device->device(), buffer, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); vkFreeMemory(m_device->handle(), mem, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferBufferViewDestroyed) { TEST_DESCRIPTION("Delete bufferView bound to cmd buffer, then attempt to submit cmd buffer."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count; ds_type_count.type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding layout_binding; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; layout_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {layout_binding}); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); VkDescriptorSet descriptor_set; err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferView view; VkBufferViewCreateInfo bvci = {}; bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; bvci.buffer = buffer; bvci.format = VK_FORMAT_R32_SFLOAT; bvci.range = VK_WHOLE_SIZE; err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view); ASSERT_VK_SUCCESS(err); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0, r32f) uniform readonly imageBuffer s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = imageLoad(s, 0);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound BufferView "); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // Bind pipeline to cmd buffer - This causes crash on Mali vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set, 0, nullptr); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Delete BufferView in order to invalidate cmd buffer vkDestroyBufferView(m_device->device(), view, NULL); // Now attempt submit of cmd buffer VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Clean-up vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferImageDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to an image dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Have to bind memory to image before recording cmd in cmd buffer using it VkMemoryRequirements mem_reqs; VkDeviceMemory image_mem; bool pass; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_mem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkClearColorValue ccv; ccv.float32[0] = 1.0f; ccv.float32[1] = 1.0f; ccv.float32[2] = 1.0f; ccv.float32[3] = 1.0f; VkImageSubresourceRange isr = {}; isr.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; isr.baseArrayLayer = 0; isr.baseMipLevel = 0; isr.layerCount = 1; isr.levelCount = 1; vkCmdClearColorImage(m_commandBuffer->handle(), image, VK_IMAGE_LAYOUT_GENERAL, &ccv, 1, &isr); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image "); // Destroy image dependency prior to submit to cause ERROR vkDestroyImage(m_device->device(), image, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), image_mem, nullptr); } TEST_F(VkLayerTest, InvalidCmdBufferFramebufferImageDestroyed) { TEST_DESCRIPTION( "Attempt to draw with a command buffer that is invalid due to a framebuffer image dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; VkResult err = VK_SUCCESS; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties); if (!(format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) { printf("%s Image format doesn't support required features.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageCreateInfo image_ci = {}; image_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_ci.pNext = NULL; image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.format = VK_FORMAT_B8G8R8A8_UNORM; image_ci.extent.width = 32; image_ci.extent.height = 32; image_ci.extent.depth = 1; image_ci.mipLevels = 1; image_ci.arrayLayers = 1; image_ci.samples = VK_SAMPLE_COUNT_1_BIT; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; image_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_ci.flags = 0; VkImage image; ASSERT_VK_SUCCESS(vkCreateImage(m_device->handle(), &image_ci, NULL, &image)); VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image, VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_B8G8R8A8_UNORM, {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Just use default renderpass with our framebuffer m_renderPassBeginInfo.framebuffer = fb; m_renderPassBeginInfo.renderArea.extent.width = 32; m_renderPassBeginInfo.renderArea.extent.height = 32; // Create Null cmd buffer for submit m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Destroy image attached to framebuffer to invalidate cmd buffer vkDestroyImage(m_device->device(), image, NULL); // Now attempt to submit cmd buffer and verify error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image "); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); vkFreeMemory(m_device->device(), image_memory, nullptr); } TEST_F(VkLayerTest, FramebufferInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use framebuffer."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; VkResult err = VK_SUCCESS; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(256, 256, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Just use default renderpass with our framebuffer m_renderPassBeginInfo.framebuffer = fb; // Create Null cmd buffer for submit m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put it in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Destroy framebuffer while in-flight m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyFramebuffer-framebuffer-00892"); vkDestroyFramebuffer(m_device->device(), fb, NULL); m_errorMonitor->VerifyFound(); // Wait for queue to complete so we can safely destroy everything vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If framebuffer is not VK_NULL_HANDLE, framebuffer must be a valid VkFramebuffer handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Framebuffer obj"); vkDestroyFramebuffer(m_device->device(), fb, nullptr); } TEST_F(VkLayerTest, FramebufferImageInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use image that's child of framebuffer."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; VkResult err = VK_SUCCESS; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageCreateInfo image_ci = {}; image_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_ci.pNext = NULL; image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.format = VK_FORMAT_B8G8R8A8_UNORM; image_ci.extent.width = 256; image_ci.extent.height = 256; image_ci.extent.depth = 1; image_ci.mipLevels = 1; image_ci.arrayLayers = 1; image_ci.samples = VK_SAMPLE_COUNT_1_BIT; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_ci.flags = 0; VkImage image; ASSERT_VK_SUCCESS(vkCreateImage(m_device->handle(), &image_ci, NULL, &image)); VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image, VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_B8G8R8A8_UNORM, {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Just use default renderpass with our framebuffer m_renderPassBeginInfo.framebuffer = fb; // Create Null cmd buffer for submit m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put it (and attached imageView) in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer to put framebuffer and children in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Destroy image attached to framebuffer while in-flight m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyImage-image-01000"); vkDestroyImage(m_device->device(), image, NULL); m_errorMonitor->VerifyFound(); // Wait for queue to complete so we can safely destroy image and other objects vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If image is not VK_NULL_HANDLE, image must be a valid VkImage handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Image obj"); vkDestroyImage(m_device->device(), image, NULL); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); vkFreeMemory(m_device->device(), image_memory, nullptr); } TEST_F(VkLayerTest, ImageMemoryNotBound) { TEST_DESCRIPTION("Attempt to draw with an image which has not had memory bound to it."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Have to bind memory to image before recording cmd in cmd buffer using it VkMemoryRequirements mem_reqs; VkDeviceMemory image_mem; bool pass; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &image_mem); ASSERT_VK_SUCCESS(err); // Introduce error, do not call vkBindImageMemory(m_device->device(), image, image_mem, 0); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindImageMemory()."); m_commandBuffer->begin(); VkClearColorValue ccv; ccv.float32[0] = 1.0f; ccv.float32[1] = 1.0f; ccv.float32[2] = 1.0f; ccv.float32[3] = 1.0f; VkImageSubresourceRange isr = {}; isr.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; isr.baseArrayLayer = 0; isr.baseMipLevel = 0; isr.layerCount = 1; isr.levelCount = 1; vkCmdClearColorImage(m_commandBuffer->handle(), image, VK_IMAGE_LAYOUT_GENERAL, &ccv, 1, &isr); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), image_mem, nullptr); } TEST_F(VkLayerTest, BufferMemoryNotBound) { TEST_DESCRIPTION("Attempt to copy from a buffer which has not had memory bound to it."); ASSERT_NO_FATAL_FAILURE(Init()); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkBuffer buffer; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; buf_info.size = 1024; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = 1024; bool pass = false; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); // Introduce failure by not calling vkBindBufferMemory(m_device->device(), buffer, mem, 0); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()."); VkBufferImageCopy region = {}; region.bufferRowLength = 16; region.bufferImageHeight = 16; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageExtent.height = 4; region.imageExtent.width = 4; region.imageExtent.depth = 1; m_commandBuffer->begin(); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer, image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->handle(), mem, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferEventDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to an event dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo evci = {}; evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; VkResult result = vkCreateEvent(m_device->device(), &evci, NULL, &event); ASSERT_VK_SUCCESS(result); m_commandBuffer->begin(); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Event "); // Destroy event dependency prior to submit to cause ERROR vkDestroyEvent(m_device->device(), event, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCmdBufferQueryPoolDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a query pool dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkQueryPool query_pool; VkQueryPoolCreateInfo qpci{}; qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; qpci.queryType = VK_QUERY_TYPE_TIMESTAMP; qpci.queryCount = 1; VkResult result = vkCreateQueryPool(m_device->device(), &qpci, nullptr, &query_pool); ASSERT_VK_SUCCESS(result); m_commandBuffer->begin(); vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound QueryPool "); // Destroy query pool dependency prior to submit to cause ERROR vkDestroyQueryPool(m_device->device(), query_pool, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCmdBufferPipelineDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a pipeline dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); { // Use helper to create graphics pipeline CreatePipelineHelper helper(*this); helper.InitInfo(); helper.InitState(); helper.CreateGraphicsPipeline(); // Bind helper pipeline to command buffer m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, helper.pipeline_); m_commandBuffer->end(); // pipeline will be destroyed when helper goes out of scope } // Cause error by submitting command buffer that references destroyed pipeline m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Pipeline "); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, DestroyPipelineRenderPass) { TEST_DESCRIPTION("Draw using a pipeline whose create renderPass has been destroyed."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkResult err; // Create a renderPass that's compatible with Draw-time renderPass VkAttachmentDescription att = {}; att.format = m_render_target_fmt; att.samples = VK_SAMPLE_COUNT_1_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference ref = {}; ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; ref.attachment = 0; m_renderPassClearValues.clear(); VkClearValue clear = {}; clear.color = m_clear_color; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.flags = 0; subpass.inputAttachmentCount = 0; subpass.pInputAttachments = NULL; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &ref; subpass.pResolveAttachments = NULL; subpass.pDepthStencilAttachment = NULL; subpass.preserveAttachmentCount = 0; subpass.pPreserveAttachments = NULL; VkRenderPassCreateInfo rp_info = {}; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.attachmentCount = 1; rp_info.pAttachments = &att; rp_info.subpassCount = 1; rp_info.pSubpasses = &subpass; VkRenderPass rp; err = vkCreateRenderPass(device(), &rp_info, NULL, &rp); ASSERT_VK_SUCCESS(err); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {{0, 0}, {64, 64}}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); const VkPipelineLayoutObj pl(m_device); pipe.CreateVKPipeline(pl.handle(), rp); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Destroy renderPass before pipeline is used in Draw // We delay until after CmdBindPipeline to verify that invalid binding isn't // created between CB & renderPass, which we used to do. vkDestroyRenderPass(m_device->device(), rp, nullptr); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); vkQueueWaitIdle(m_device->m_queue); } TEST_F(VkLayerTest, InvalidCmdBufferDescriptorSetBufferDestroyed) { TEST_DESCRIPTION( "Attempt to draw with a command buffer that is invalid due to a bound descriptor set with a buffer dependency being " "destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Allocate memory and bind to buffer so we can make it to the appropriate // error VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); // Correctly update descriptor to avoid "NOT_UPDATED" error VkDescriptorBufferInfo buffInfo = {}; buffInfo.buffer = buffer; buffInfo.offset = 0; buffInfo.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = &buffInfo; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &m_viewports[0]); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &m_scissors[0]); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer "); // Destroy buffer should invalidate the cmd buffer, causing error on submit vkDestroyBuffer(m_device->device(), buffer, NULL); // Attempt to submit cmd buffer VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Cleanup vkFreeMemory(m_device->device(), mem, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferDescriptorSetImageSamplerDestroyed) { TEST_DESCRIPTION( "Attempt to draw with a command buffer that is invalid due to a bound descriptor sets with a combined image sampler having " "their image, sampler, and descriptor set each respectively destroyed and then attempting to submit associated cmd " "buffers. Attempt to destroy a DescriptorSet that is in use."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create images to update the descriptor with VkImage image; VkImage image2; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image2); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Allocate enough memory for both images memory_info.allocationSize = memory_reqs.size * 2; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); // Bind second image to memory right after first image err = vkBindImageMemory(m_device->device(), image2, image_memory, memory_reqs.size); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView tmp_view; // First test deletes this view VkImageView view; VkImageView view2; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &tmp_view); ASSERT_VK_SUCCESS(err); err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); image_view_create_info.image = image2; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view2); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkSampler sampler2; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler2); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = tmp_view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); // First error case is destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // This first submit should be successful vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); // Now destroy imageview and reset cmdBuffer vkDestroyImageView(m_device->device(), tmp_view, NULL); m_commandBuffer->reset(0); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that has been destroyed."); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Re-update descriptor with new view img_info.imageView = view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Now test destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Destroy sampler invalidates the cmd buffer, causing error on submit vkDestroySampler(m_device->device(), sampler, NULL); // Attempt to submit cmd buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is invalid because bound Sampler"); submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Now re-update descriptor with valid sampler and delete image img_info.sampler = sampler2; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); VkCommandBufferBeginInfo info = {}; info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image "); m_commandBuffer->begin(&info); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Destroy image invalidates the cmd buffer, causing error on submit vkDestroyImage(m_device->device(), image, NULL); // Attempt to submit cmd buffer submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Now update descriptor to be valid, but then free descriptor img_info.imageView = view2; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->begin(&info); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Immediately try to destroy the descriptor set in the active command buffer - failure expected m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Cannot call vkFreeDescriptorSets() on descriptor set 0x"); vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); m_errorMonitor->VerifyFound(); // Try again once the queue is idle - should succeed w/o error // TODO - though the particular error above doesn't re-occur, there are other 'unexpecteds' still to clean up vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError( "pDescriptorSets must be a valid pointer to an array of descriptorSetCount VkDescriptorSet handles, each element of which " "must either be a valid handle or VK_NULL_HANDLE"); m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorSet obj"); vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); // Attempt to submit cmd buffer containing the freed descriptor set submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound DescriptorSet "); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Cleanup vkFreeMemory(m_device->device(), image_memory, NULL); vkDestroySampler(m_device->device(), sampler2, NULL); vkDestroyImage(m_device->device(), image2, NULL); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyImageView(m_device->device(), view2, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidDescriptorSetSamplerDestroyed) { TEST_DESCRIPTION("Attempt to draw with a bound descriptor sets with a combined image sampler where sampler has been deleted."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); // Create images to update the descriptor with VkImageObj image(m_device); const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image.handle(); image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; VkResult err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Destroy the sampler before it's bound to the cmd buffer vkDestroySampler(m_device->device(), sampler, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); // First error case is destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " Descriptor in binding #0 at global descriptor index 0 is using sampler "); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyImageView(m_device->device(), view, NULL); } TEST_F(VkLayerTest, ImageDescriptorLayoutMismatchInternal) { TEST_DESCRIPTION("Create an image sampler layout->image layout mismatch within a command buffer"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create images to update the descriptor with const VkFormat format = VK_FORMAT_B8G8R8A8_UNORM; VkImageObj image(m_device); image.Init(32, 32, 1, format, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image.handle(); image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); // Create Sampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; // This should cause a mis-match. Actual layout at use time is SHADER_RO img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); VkCommandBufferObj cmd_buf(m_device, m_commandPool); cmd_buf.begin(); // record layout different than actual descriptor layout of SHADER_RO image.SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); cmd_buf.BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(cmd_buf.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(cmd_buf.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(cmd_buf.handle(), 0, 1, &viewport); vkCmdSetScissor(cmd_buf.handle(), 0, 1, &scissor); // At draw time the update layout will mis-match the actual layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorImageInfo-imageLayout-00344"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-DescriptorSetNotUpdated"); cmd_buf.Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); cmd_buf.EndRenderPass(); cmd_buf.end(); // Submit cmd buffer VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmd_buf.handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); // Cleanup vkDestroySampler(m_device->device(), sampler, NULL); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, ImageDescriptorLayoutMismatchExternal) { TEST_DESCRIPTION("Create an image sampler layout->image layout mismatch external to a command buffer"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkImageObj image(m_device); // Transition image to be used in shader to SHADER_READ_ONLY_OPTIMAL image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo image_info{}; // Set error condition -- anything but Shader_Read_Only_Optimal which is the current image layout image_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL; image_info.imageView = view; image_info.sampler = sampler; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); vkDestroyImageView(m_device->device(), view, NULL); vkDestroySampler(m_device->device(), sampler, nullptr); } TEST_F(VkLayerTest, DescriptorPoolInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete a DescriptorPool with a DescriptorSet that is in use."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptor_set; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create image to update the descriptor with VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); // Create Sampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put pool in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Destroy pool while in-flight, causing error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyDescriptorPool-descriptorPool-00303"); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Cleanup vkDestroySampler(m_device->device(), sampler, NULL); m_errorMonitor->SetUnexpectedError( "If descriptorPool is not VK_NULL_HANDLE, descriptorPool must be a valid VkDescriptorPool handle"); m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorPool obj"); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); // TODO : It seems Validation layers think ds_pool was already destroyed, even though it wasn't? } TEST_F(VkLayerTest, DescriptorPoolInUseResetSignaled) { TEST_DESCRIPTION("Reset a DescriptorPool with a DescriptorSet that is in use."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = nullptr; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, nullptr, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = nullptr; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptor_set; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create image to update the descriptor with VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); // Create Sampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, nullptr, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, nullptr); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put pool in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Reset pool while in-flight, causing error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetDescriptorPool-descriptorPool-00313"); vkResetDescriptorPool(m_device->device(), ds_pool, 0); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Cleanup vkDestroySampler(m_device->device(), sampler, nullptr); m_errorMonitor->SetUnexpectedError( "If descriptorPool is not VK_NULL_HANDLE, descriptorPool must be a valid VkDescriptorPool handle"); m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorPool obj"); vkDestroyDescriptorPool(m_device->device(), ds_pool, nullptr); } TEST_F(VkLayerTest, DescriptorImageUpdateNoMemoryBound) { TEST_DESCRIPTION("Attempt an image descriptor set update where image's bound memory has been freed."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create images to update the descriptor with VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Initially bind memory to avoid error at bind view time. We'll break binding before update. VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Allocate enough memory for image memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; // Break memory binding and attempt update vkFreeMemory(m_device->device(), image_memory, nullptr); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " previously bound memory was freed. Memory must not be freed prior to this operation."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkUpdateDescriptorSets() failed write update validation for Descriptor Set 0x"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Cleanup vkDestroyImage(m_device->device(), image, NULL); vkDestroySampler(m_device->device(), sampler, NULL); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidPipeline) { uint64_t fake_pipeline_handle = 0xbaad6001; VkPipeline bad_pipeline = reinterpret_cast<VkPipeline &>(fake_pipeline_handle); // Enable VK_KHR_draw_indirect_count for KHR variants ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); bool has_khr_indirect = DeviceExtensionEnabled(VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Attempt to bind an invalid Pipeline to a valid Command Buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindPipeline-pipeline-parameter"); m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, bad_pipeline); m_errorMonitor->VerifyFound(); // Try each of the 6 flavors of Draw() m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Draw*() calls must be submitted within a renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDraw-None-00442"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexed-None-00461"); m_commandBuffer->DrawIndexed(1, 1, 0, 0, 0); m_errorMonitor->VerifyFound(); VkBufferObj buffer; VkBufferCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; ci.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; ci.size = 1024; buffer.init(*m_device, ci); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirect-None-00485"); vkCmdDrawIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 1, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirect-None-00537"); vkCmdDrawIndexedIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 1, 0); m_errorMonitor->VerifyFound(); if (has_khr_indirect) { auto fpCmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdDrawIndirectCountKHR"); ASSERT_NE(fpCmdDrawIndirectCountKHR, nullptr); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-None-03119"); fpCmdDrawIndirectCountKHR(m_commandBuffer->handle(), buffer.handle(), 0, buffer.handle(), 512, 1, 0); m_errorMonitor->VerifyFound(); auto fpCmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdDrawIndexedIndirectCountKHR"); ASSERT_NE(fpCmdDrawIndexedIndirectCountKHR, nullptr); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-None-03151"); fpCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), buffer.handle(), 0, buffer.handle(), 512, 1, 0); m_errorMonitor->VerifyFound(); } // Also try the Dispatch variants vkCmdEndRenderPass(m_commandBuffer->handle()); // Compute submissions must be outside a renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-None-00391"); vkCmdDispatch(m_commandBuffer->handle(), 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchIndirect-None-00404"); vkCmdDispatchIndirect(m_commandBuffer->handle(), buffer.handle(), 0); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CmdDispatchExceedLimits) { TEST_DESCRIPTION("Compute dispatch with dimensions that exceed device limits"); // Enable KHX device group extensions, if available if (InstanceExtensionSupported(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool khx_dg_ext_available = false; if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DEVICE_GROUP_EXTENSION_NAME); khx_dg_ext_available = true; } ASSERT_NO_FATAL_FAILURE(InitState()); uint32_t x_limit = m_device->props.limits.maxComputeWorkGroupCount[0]; uint32_t y_limit = m_device->props.limits.maxComputeWorkGroupCount[1]; uint32_t z_limit = m_device->props.limits.maxComputeWorkGroupCount[2]; if (std::max({x_limit, y_limit, z_limit}) == UINT32_MAX) { printf("%s device maxComputeWorkGroupCount limit reports UINT32_MAX, test not possible, skipping.\n", kSkipPrefix); return; } // Create a minimal compute pipeline std::string cs_text = "#version 450\nvoid main() {}\n"; // minimal no-op shader VkShaderObj cs_obj(m_device, cs_text.c_str(), VK_SHADER_STAGE_COMPUTE_BIT, this); VkPipelineLayoutCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; info.pNext = nullptr; VkPipelineLayout pipe_layout; vkCreatePipelineLayout(device(), &info, nullptr, &pipe_layout); VkComputePipelineCreateInfo pipeline_info = {}; pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; pipeline_info.pNext = nullptr; pipeline_info.flags = khx_dg_ext_available ? VK_PIPELINE_CREATE_DISPATCH_BASE_KHR : 0; pipeline_info.layout = pipe_layout; pipeline_info.basePipelineHandle = VK_NULL_HANDLE; pipeline_info.basePipelineIndex = -1; pipeline_info.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; pipeline_info.stage.pNext = nullptr; pipeline_info.stage.flags = 0; pipeline_info.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; pipeline_info.stage.module = cs_obj.handle(); pipeline_info.stage.pName = "main"; pipeline_info.stage.pSpecializationInfo = nullptr; VkPipeline cs_pipeline; vkCreateComputePipelines(device(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &cs_pipeline); // Bind pipeline to command buffer m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, cs_pipeline); // Dispatch counts that exceed device limits m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountX-00386"); vkCmdDispatch(m_commandBuffer->handle(), x_limit + 1, y_limit, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountY-00387"); vkCmdDispatch(m_commandBuffer->handle(), x_limit, y_limit + 1, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountZ-00388"); vkCmdDispatch(m_commandBuffer->handle(), x_limit, y_limit, z_limit + 1); m_errorMonitor->VerifyFound(); if (khx_dg_ext_available) { PFN_vkCmdDispatchBaseKHR fp_vkCmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR)vkGetInstanceProcAddr(instance(), "vkCmdDispatchBaseKHR"); // Base equals or exceeds limit m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupX-00421"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit, y_limit - 1, z_limit - 1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupX-00422"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit - 1, y_limit, z_limit - 1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupZ-00423"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit - 1, y_limit - 1, z_limit, 0, 0, 0); m_errorMonitor->VerifyFound(); // (Base + count) exceeds limit uint32_t x_base = x_limit / 2; uint32_t y_base = y_limit / 2; uint32_t z_base = z_limit / 2; x_limit -= x_base; y_limit -= y_base; z_limit -= z_base; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountX-00424"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit + 1, y_limit, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountY-00425"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit, y_limit + 1, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountZ-00426"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit, y_limit, z_limit + 1); m_errorMonitor->VerifyFound(); } else { printf("%s KHX_DEVICE_GROUP_* extensions not supported, skipping CmdDispatchBaseKHR() tests.\n", kSkipPrefix); } // Clean up vkDestroyPipeline(device(), cs_pipeline, nullptr); vkDestroyPipelineLayout(device(), pipe_layout, nullptr); } TEST_F(VkLayerTest, MultiplaneImageLayoutBadAspectFlags) { TEST_DESCRIPTION("Query layout of a multiplane image using illegal aspect flag masks"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_LINEAR; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify formats bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT); ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR; supported = supported && ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } VkImage image_2plane, image_3plane; ci.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR; VkResult err = vkCreateImage(device(), &ci, NULL, &image_2plane); ASSERT_VK_SUCCESS(err); ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR; err = vkCreateImage(device(), &ci, NULL, &image_3plane); ASSERT_VK_SUCCESS(err); // Query layout of 3rd plane, for a 2-plane image VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; subres.mipLevel = 0; subres.arrayLayer = 0; VkSubresourceLayout layout = {}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-format-01581"); vkGetImageSubresourceLayout(device(), image_2plane, &subres, &layout); m_errorMonitor->VerifyFound(); // Query layout using color aspect, for a 3-plane image subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-format-01582"); vkGetImageSubresourceLayout(device(), image_3plane, &subres, &layout); m_errorMonitor->VerifyFound(); // Clean up vkDestroyImage(device(), image_2plane, NULL); vkDestroyImage(device(), image_3plane, NULL); } TEST_F(VkPositiveLayerTest, MultiplaneGetImageSubresourceLayout) { TEST_DESCRIPTION("Positive test, query layout of a single plane of a multiplane image. (repro Github #2530)"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_LINEAR; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify format bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } VkImage image; VkResult err = vkCreateImage(device(), &ci, NULL, &image); ASSERT_VK_SUCCESS(err); // Query layout of 3rd plane VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; subres.mipLevel = 0; subres.arrayLayer = 0; VkSubresourceLayout layout = {}; m_errorMonitor->ExpectSuccess(); vkGetImageSubresourceLayout(device(), image, &subres, &layout); m_errorMonitor->VerifyNotFound(); vkDestroyImage(device(), image, NULL); } TEST_F(VkLayerTest, InvalidBufferViewObject) { // Create a single TEXEL_BUFFER descriptor and send it an invalid bufferView // First, cause the bufferView to be invalid due to underlying buffer being destroyed // Then destroy view itself and verify that same error is hit VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00323"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); // Create a valid bufferView to start with VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferView view; VkBufferViewCreateInfo bvci = {}; bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; bvci.buffer = buffer; bvci.format = VK_FORMAT_R32_SFLOAT; bvci.range = VK_WHOLE_SIZE; err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view); ASSERT_VK_SUCCESS(err); // First Destroy buffer underlying view which should hit error in CV vkDestroyBuffer(m_device->device(), buffer, NULL); VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Now destroy view itself and verify same error, which is hit in PV this time vkDestroyBufferView(m_device->device(), view, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00323"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), buffer_memory, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, CreateBufferViewNoMemoryBoundToBuffer) { TEST_DESCRIPTION("Attempt to create a buffer view with a buffer that has no memory bound to it."); VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()."); ASSERT_NO_FATAL_FAILURE(Init()); // Create a buffer with no bound memory and then attempt to create // a buffer view. VkBufferCreateInfo buff_ci = {}; buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT; buff_ci.size = 256; buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkBufferViewCreateInfo buff_view_ci = {}; buff_view_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; buff_view_ci.buffer = buffer; buff_view_ci.format = VK_FORMAT_R8_UNORM; buff_view_ci.range = VK_WHOLE_SIZE; VkBufferView buff_view; err = vkCreateBufferView(m_device->device(), &buff_view_ci, NULL, &buff_view); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); // If last error is success, it still created the view, so delete it. if (err == VK_SUCCESS) { vkDestroyBufferView(m_device->device(), buff_view, NULL); } } TEST_F(VkLayerTest, InvalidBufferViewCreateInfoEntries) { TEST_DESCRIPTION("Attempt to create a buffer view with invalid create info."); ASSERT_NO_FATAL_FAILURE(Init()); const VkPhysicalDeviceLimits &dev_limits = m_device->props.limits; const VkDeviceSize minTexelBufferOffsetAlignment = dev_limits.minTexelBufferOffsetAlignment; if (minTexelBufferOffsetAlignment == 1) { printf("%s Test requires minTexelOffsetAlignment to not be equal to 1. \n", kSkipPrefix); return; } const VkFormat format_with_uniform_texel_support = VK_FORMAT_R8G8B8A8_UNORM; const char *format_with_uniform_texel_support_string = "VK_FORMAT_R8G8B8A8_UNORM"; const VkFormat format_without_texel_support = VK_FORMAT_R8G8B8_UNORM; const char *format_without_texel_support_string = "VK_FORMAT_R8G8B8_UNORM"; VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), format_with_uniform_texel_support, &format_properties); if (!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) { printf("%s Test requires %s to support VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT\n", kSkipPrefix, format_with_uniform_texel_support_string); return; } vkGetPhysicalDeviceFormatProperties(gpu(), format_without_texel_support, &format_properties); if ((format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) || (format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) { printf( "%s Test requires %s to not support VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT nor " "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT\n", kSkipPrefix, format_without_texel_support_string); return; } // Create a test buffer--buffer must have been created using VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT or // VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, so use a different usage value instead to cause an error const VkDeviceSize resource_size = 1024; const VkBufferCreateInfo bad_buffer_info = VkBufferObj::create_info(resource_size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT); VkBufferObj bad_buffer; bad_buffer.init(*m_device, bad_buffer_info, (VkMemoryPropertyFlags)VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); // Create a test buffer view VkBufferViewCreateInfo buff_view_ci = {}; buff_view_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; buff_view_ci.buffer = bad_buffer.handle(); buff_view_ci.format = format_with_uniform_texel_support; buff_view_ci.range = VK_WHOLE_SIZE; auto CatchError = [this, &buff_view_ci](const string &desired_error_string) { VkBufferView buff_view; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_error_string); VkResult err = vkCreateBufferView(m_device->device(), &buff_view_ci, NULL, &buff_view); m_errorMonitor->VerifyFound(); // If previous error is success, it still created the view, so delete it if (err == VK_SUCCESS) { vkDestroyBufferView(m_device->device(), buff_view, NULL); } }; CatchError("VUID-VkBufferViewCreateInfo-buffer-00932"); // Create a better test buffer const VkBufferCreateInfo buffer_info = VkBufferObj::create_info(resource_size, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT); VkBufferObj buffer; buffer.init(*m_device, buffer_info, (VkMemoryPropertyFlags)VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); // Offset must be less than the size of the buffer, so set it equal to the buffer size to cause an error buff_view_ci.buffer = buffer.handle(); buff_view_ci.offset = buffer.create_info().size; CatchError("VUID-VkBufferViewCreateInfo-offset-00925"); // Offset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment so add 1 to ensure it is not buff_view_ci.offset = minTexelBufferOffsetAlignment + 1; CatchError("VUID-VkBufferViewCreateInfo-offset-00926"); // Set offset to acceptable value for range tests buff_view_ci.offset = minTexelBufferOffsetAlignment; // Setting range equal to 0 will cause an error to occur buff_view_ci.range = 0; CatchError("VUID-VkBufferViewCreateInfo-range-00928"); size_t format_size = FormatSize(buff_view_ci.format); // Range must be a multiple of the element size of format, so add one to ensure it is not buff_view_ci.range = format_size + 1; CatchError("VUID-VkBufferViewCreateInfo-range-00929"); // Twice the element size of format multiplied by VkPhysicalDeviceLimits::maxTexelBufferElements guarantees range divided by the // element size is greater than maxTexelBufferElements, causing failure buff_view_ci.range = 2 * format_size * dev_limits.maxTexelBufferElements; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferViewCreateInfo-range-00930"); CatchError("VUID-VkBufferViewCreateInfo-offset-00931"); // Set rage to acceptable value for buffer tests buff_view_ci.format = format_without_texel_support; buff_view_ci.range = VK_WHOLE_SIZE; // `buffer` was created using VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT so we can use that for the first buffer test CatchError("VUID-VkBufferViewCreateInfo-buffer-00933"); // Create a new buffer using VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT const VkBufferCreateInfo storage_buffer_info = VkBufferObj::create_info(resource_size, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT); VkBufferObj storage_buffer; storage_buffer.init(*m_device, storage_buffer_info, (VkMemoryPropertyFlags)VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); buff_view_ci.buffer = storage_buffer.handle(); CatchError("VUID-VkBufferViewCreateInfo-buffer-00934"); } TEST_F(VkLayerTest, InvalidDynamicOffsetCases) { // Create a descriptorSet w/ dynamic descriptor and then hit 3 offset error // cases: // 1. No dynamicOffset supplied // 2. Too many dynamicOffsets supplied // 3. Dynamic offset oversteps buffer being updated VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " requires 1 dynamicOffsets, but only 0 dynamicOffsets are left in pDynamicOffsets "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); // Allocate memory and bind to buffer so we can make it to the appropriate error VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), dyub, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), dyub, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub, mem, 0); ASSERT_VK_SUCCESS(err); // Correctly update descriptor to avoid "NOT_UPDATED" error VkDescriptorBufferInfo buffInfo = {}; buffInfo.buffer = dyub; buffInfo.offset = 0; buffInfo.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; descriptor_write.pBufferInfo = &buffInfo; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); m_errorMonitor->VerifyFound(); uint32_t pDynOff[2] = {512, 756}; // Now cause error b/c too many dynOffsets in array for # of dyn descriptors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Attempting to bind 1 descriptorSets with 1 dynamic descriptors, but "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 2, pDynOff); m_errorMonitor->VerifyFound(); // Finally cause error due to dynamicOffset being too big m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, " dynamic offset 512 combined with offset 0 and range 1024 that oversteps the buffer size of 1024"); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // This update should succeed, but offset size of 512 will overstep buffer // /w range 1024 & size 1024 vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 1, pDynOff); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), dyub, NULL); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, DescriptorBufferUpdateNoMemoryBound) { TEST_DESCRIPTION("Attempt to update a descriptor with a non-sparse buffer that doesn't have memory bound"); VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkUpdateDescriptorSets() failed write update validation for Descriptor Set 0x"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); // Attempt to update descriptor without binding memory to it VkDescriptorBufferInfo buffInfo = {}; buffInfo.buffer = dyub; buffInfo.offset = 0; buffInfo.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; descriptor_write.pBufferInfo = &buffInfo; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), dyub, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidPushConstants) { ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineLayout pipeline_layout; VkPushConstantRange pc_range = {}; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pushConstantRangeCount = 1; pipeline_layout_ci.pPushConstantRanges = &pc_range; // // Check for invalid push constant ranges in pipeline layouts. // struct PipelineLayoutTestCase { VkPushConstantRange const range; char const *msg; }; const uint32_t too_big = m_device->props.limits.maxPushConstantsSize + 0x4; const std::array<PipelineLayoutTestCase, 10> range_tests = {{ {{VK_SHADER_STAGE_VERTEX_BIT, 0, 0}, "vkCreatePipelineLayout() call has push constants index 0 with size 0."}, {{VK_SHADER_STAGE_VERTEX_BIT, 0, 1}, "vkCreatePipelineLayout() call has push constants index 0 with size 1."}, {{VK_SHADER_STAGE_VERTEX_BIT, 4, 1}, "vkCreatePipelineLayout() call has push constants index 0 with size 1."}, {{VK_SHADER_STAGE_VERTEX_BIT, 4, 0}, "vkCreatePipelineLayout() call has push constants index 0 with size 0."}, {{VK_SHADER_STAGE_VERTEX_BIT, 1, 4}, "vkCreatePipelineLayout() call has push constants index 0 with offset 1. Offset must"}, {{VK_SHADER_STAGE_VERTEX_BIT, 0, too_big}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, too_big, too_big}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, too_big, 4}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, 0xFFFFFFF0, 0x00000020}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, 0x00000020, 0xFFFFFFF0}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, }}; // Check for invalid offset and size for (const auto &iter : range_tests) { pc_range = iter.range; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, iter.msg); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } // Check for invalid stage flag pc_range.offset = 0; pc_range.size = 16; pc_range.stageFlags = 0; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreatePipelineLayout: value of pCreateInfo->pPushConstantRanges[0].stageFlags must not be 0"); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // Check for duplicate stage flags in a list of push constant ranges. // A shader can only have one push constant block and that block is mapped // to the push constant range that has that shader's stage flag set. // The shader's stage flag can only appear once in all the ranges, so the // implementation can find the one and only range to map it to. const uint32_t ranges_per_test = 5; struct DuplicateStageFlagsTestCase { VkPushConstantRange const ranges[ranges_per_test]; std::vector<char const *> const msg; }; // Overlapping ranges are OK, but a stage flag can appear only once. const std::array<DuplicateStageFlagsTestCase, 3> duplicate_stageFlags_tests = { { {{{VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}}, { "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 1.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 2.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 4.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 2.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 4.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 4.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 3 and 4.", }}, {{{VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4}, {VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4}}, { "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 4.", }}, {{{VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4}, {VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4}}, { "vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 3.", }}, }, }; for (const auto &iter : duplicate_stageFlags_tests) { pipeline_layout_ci.pPushConstantRanges = iter.ranges; pipeline_layout_ci.pushConstantRangeCount = ranges_per_test; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, iter.msg.begin(), iter.msg.end()); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } // // CmdPushConstants tests // // Setup a pipeline layout with ranges: [0,32) [16,80) const std::vector<VkPushConstantRange> pc_range2 = {{VK_SHADER_STAGE_VERTEX_BIT, 16, 64}, {VK_SHADER_STAGE_FRAGMENT_BIT, 0, 32}}; const VkPipelineLayoutObj pipeline_layout_obj(m_device, {}, pc_range2); const uint8_t dummy_values[100] = {}; m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Check for invalid stage flag // Note that VU 00996 isn't reached due to parameter validation m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdPushConstants: value of stageFlags must not be 0"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), 0, 0, 16, dummy_values); m_errorMonitor->VerifyFound(); // Positive tests for the overlapping ranges m_errorMonitor->ExpectSuccess(); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16, dummy_values); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT, 32, 48, dummy_values); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 16, 16, dummy_values); m_errorMonitor->VerifyNotFound(); // Wrong cmd stages for extant range // No range for all cmd stages -- "VUID-vkCmdPushConstants-offset-01795" VUID-vkCmdPushConstants-offset-01795 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795"); // Missing cmd stages for found overlapping range -- "VUID-vkCmdPushConstants-offset-01796" VUID-vkCmdPushConstants-offset-01796 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01796"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_GEOMETRY_BIT, 0, 16, dummy_values); m_errorMonitor->VerifyFound(); // Wrong no extant range m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_FRAGMENT_BIT, 80, 4, dummy_values); m_errorMonitor->VerifyFound(); // Wrong overlapping extent m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, 20, dummy_values); m_errorMonitor->VerifyFound(); // Wrong stage flags for valid overlapping range m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01796"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT, 16, 16, dummy_values); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DescriptorSetCompatibility) { // Test various desriptorSet errors with bad binding combinations using std::vector; VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); static const uint32_t NUM_DESCRIPTOR_TYPES = 5; VkDescriptorPoolSize ds_type_count[NUM_DESCRIPTOR_TYPES] = {}; ds_type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count[0].descriptorCount = 10; ds_type_count[1].type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; ds_type_count[1].descriptorCount = 2; ds_type_count[2].type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; ds_type_count[2].descriptorCount = 2; ds_type_count[3].type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count[3].descriptorCount = 5; // TODO : LunarG ILO driver currently asserts in desc.c w/ INPUT_ATTACHMENT // type // ds_type_count[4].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; ds_type_count[4].type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; ds_type_count[4].descriptorCount = 2; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 5; ds_pool_ci.poolSizeCount = NUM_DESCRIPTOR_TYPES; ds_pool_ci.pPoolSizes = ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); static const uint32_t MAX_DS_TYPES_IN_LAYOUT = 2; VkDescriptorSetLayoutBinding dsl_binding[MAX_DS_TYPES_IN_LAYOUT] = {}; dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[0].descriptorCount = 5; dsl_binding[0].stageFlags = VK_SHADER_STAGE_ALL; dsl_binding[0].pImmutableSamplers = NULL; // Create layout identical to set0 layout but w/ different stageFlags VkDescriptorSetLayoutBinding dsl_fs_stage_only = {}; dsl_fs_stage_only.binding = 0; dsl_fs_stage_only.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_fs_stage_only.descriptorCount = 5; dsl_fs_stage_only.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; // Different stageFlags to cause error at // bind time dsl_fs_stage_only.pImmutableSamplers = NULL; vector<VkDescriptorSetLayoutObj> ds_layouts; // Create 4 unique layouts for full pipelineLayout, and 1 special fs-only // layout for error case ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0])); const VkDescriptorSetLayoutObj ds_layout_fs_only(m_device, {dsl_fs_stage_only}); dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; dsl_binding[0].descriptorCount = 2; dsl_binding[1].binding = 1; dsl_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dsl_binding[1].descriptorCount = 2; dsl_binding[1].stageFlags = VK_SHADER_STAGE_ALL; dsl_binding[1].pImmutableSamplers = NULL; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>({dsl_binding[0], dsl_binding[1]})); dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dsl_binding[0].descriptorCount = 5; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0])); dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dsl_binding[0].descriptorCount = 2; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0])); const auto &ds_vk_layouts = MakeVkHandles<VkDescriptorSetLayout>(ds_layouts); static const uint32_t NUM_SETS = 4; VkDescriptorSet descriptorSet[NUM_SETS] = {}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorPool = ds_pool; alloc_info.descriptorSetCount = ds_vk_layouts.size(); alloc_info.pSetLayouts = ds_vk_layouts.data(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, descriptorSet); ASSERT_VK_SUCCESS(err); VkDescriptorSet ds0_fs_only = {}; alloc_info.descriptorSetCount = 1; alloc_info.pSetLayouts = &ds_layout_fs_only.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &ds0_fs_only); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layouts[0], &ds_layouts[1]}); // Create pipelineLayout with only one setLayout const VkPipelineLayoutObj single_pipe_layout(m_device, {&ds_layouts[0]}); // Create pipelineLayout with 2 descriptor setLayout at index 0 const VkPipelineLayoutObj pipe_layout_one_desc(m_device, {&ds_layouts[3]}); // Create pipelineLayout with 5 SAMPLER descriptor setLayout at index 0 const VkPipelineLayoutObj pipe_layout_five_samp(m_device, {&ds_layouts[2]}); // Create pipelineLayout with UB type, but stageFlags for FS only VkPipelineLayoutObj pipe_layout_fs_only(m_device, {&ds_layout_fs_only}); // Create pipelineLayout w/ incompatible set0 layout, but set1 is fine const VkPipelineLayoutObj pipe_layout_bad_set0(m_device, {&ds_layout_fs_only, &ds_layouts[1]}); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipe_layout_fs_only.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // TODO : Want to cause various binding incompatibility issues here to test // DrawState // First cause various verify_layout_compatibility() fails // Second disturb early and late sets and verify INFO msgs // VerifySetLayoutCompatibility fail cases: // 1. invalid VkPipelineLayout (layout) passed into vkCmdBindDescriptorSets m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindDescriptorSets-layout-parameter"); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, (VkPipelineLayout)((size_t)0xbaadb1be), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 2. layoutIndex exceeds # of layouts in layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " attempting to bind set to index 1"); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, single_pipe_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 3. Pipeline setLayout[0] has 2 descriptors, but set being bound has 5 // descriptors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " has 2 descriptors, but DescriptorSetLayout "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_one_desc.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 4. same # of descriptors but mismatch in type m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is type 'VK_DESCRIPTOR_TYPE_SAMPLER' but binding "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_five_samp.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 5. same # of descriptors but mismatch in stageFlags m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " has stageFlags 16 but binding 0 for DescriptorSetLayout "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_fs_only.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // Now that we're done actively using the pipelineLayout that gfx pipeline // was created with, we should be able to delete it. Do that now to verify // that validation obeys pipelineLayout lifetime pipe_layout_fs_only.Reset(); // Cause draw-time errors due to PSO incompatibilities // 1. Error due to not binding required set (we actually use same code as // above to disturb set0) vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_bad_set0.handle(), 1, 1, &descriptorSet[1], 0, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " uses set #0 but that set is not bound."); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // 2. Error due to bound set not being compatible with PSO's // VkPipelineLayout (diff stageFlags in this case) vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " bound as set #0 is not compatible with "); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // Remaining clean-up m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, NoBeginCommandBuffer) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "You must call vkBeginCommandBuffer() before this call to "); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBufferObj commandBuffer(m_device, m_commandPool); // Call EndCommandBuffer() w/o calling BeginCommandBuffer() vkEndCommandBuffer(commandBuffer.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SecondaryCommandBufferNullRenderpass) { ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBufferObj cb(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); // Force the failure by not setting the Renderpass and Framebuffer fields VkCommandBufferInheritanceInfo cmd_buf_hinfo = {}; cmd_buf_hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; cmd_buf_info.pInheritanceInfo = &cmd_buf_hinfo; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCommandBufferBeginInfo-flags-00053"); vkBeginCommandBuffer(cb.handle(), &cmd_buf_info); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SecondaryCommandBufferRerecordedExplicitReset) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "was destroyed or rerecorded"); // A pool we can reset in. VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); // rerecording of secondary secondary.reset(); // explicit reset here. secondary.begin(); secondary.end(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SecondaryCommandBufferRerecordedNoReset) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "was destroyed or rerecorded"); // A pool we can reset in. VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); // rerecording of secondary secondary.begin(); // implicit reset in begin secondary.end(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CascadedInvalidation) { ASSERT_NO_FATAL_FAILURE(Init()); VkEventCreateInfo eci = {VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, nullptr, 0}; VkEvent event; vkCreateEvent(m_device->device(), &eci, nullptr, &event); VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); vkCmdSetEvent(secondary.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_commandBuffer->end(); // destroying the event should invalidate both primary and secondary CB vkDestroyEvent(m_device->device(), event, nullptr); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "invalid because bound Event"); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CommandBufferResetErrors) { // Cause error due to Begin while recording CB // Then cause 2 errors for attempting to reset CB w/o having // VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT set for the pool from // which CBs were allocated. Note that this bit is off by default. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Cannot call Begin on command buffer"); ASSERT_NO_FATAL_FAILURE(Init()); // Calls AllocateCommandBuffers VkCommandBufferObj commandBuffer(m_device, m_commandPool); // Force the failure by setting the Renderpass and Framebuffer fields with (fake) data VkCommandBufferInheritanceInfo cmd_buf_hinfo = {}; cmd_buf_hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; cmd_buf_info.pInheritanceInfo = &cmd_buf_hinfo; // Begin CB to transition to recording state vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info); // Can't re-begin. This should trigger error vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetCommandBuffer-commandBuffer-00046"); VkCommandBufferResetFlags flags = 0; // Don't care about flags for this test // Reset attempt will trigger error due to incorrect CommandPool state vkResetCommandBuffer(commandBuffer.handle(), flags); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBeginCommandBuffer-commandBuffer-00050"); // Transition CB to RECORDED state vkEndCommandBuffer(commandBuffer.handle()); // Now attempting to Begin will implicitly reset, which triggers error vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidPipelineCreateState) { // Attempt to Create Gfx Pipeline w/o a VS VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid Pipeline CreateInfo State: Vertex Shader required"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); VkPipelineRasterizationStateCreateInfo rs_state_ci = {}; rs_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state_ci.polygonMode = VK_POLYGON_MODE_FILL; rs_state_ci.cullMode = VK_CULL_MODE_BACK_BIT; rs_state_ci.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rs_state_ci.depthClampEnable = VK_FALSE; rs_state_ci.rasterizerDiscardEnable = VK_TRUE; rs_state_ci.depthBiasEnable = VK_FALSE; rs_state_ci.lineWidth = 1.0f; VkPipelineVertexInputStateCreateInfo vi_ci = {}; vi_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vi_ci.pNext = nullptr; vi_ci.vertexBindingDescriptionCount = 0; vi_ci.pVertexBindingDescriptions = nullptr; vi_ci.vertexAttributeDescriptionCount = 0; vi_ci.pVertexAttributeDescriptions = nullptr; VkPipelineInputAssemblyStateCreateInfo ia_ci = {}; ia_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_ci.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkPipelineShaderStageCreateInfo shaderStages[2]; memset(&shaderStages, 0, 2 * sizeof(VkPipelineShaderStageCreateInfo)); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); shaderStages[0] = fs.GetStageCreateInfo(); // should be: vs.GetStageCreateInfo(); shaderStages[1] = fs.GetStageCreateInfo(); VkGraphicsPipelineCreateInfo gp_ci = {}; gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci.pViewportState = nullptr; // no viewport b/c rasterizer is disabled gp_ci.pRasterizationState = &rs_state_ci; gp_ci.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; gp_ci.layout = pipeline_layout.handle(); gp_ci.renderPass = renderPass(); gp_ci.pVertexInputState = &vi_ci; gp_ci.pInputAssemblyState = &ia_ci; gp_ci.stageCount = 1; gp_ci.pStages = shaderStages; VkPipelineCacheCreateInfo pc_ci = {}; pc_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pc_ci.initialDataSize = 0; pc_ci.pInitialData = 0; VkPipeline pipeline; VkPipelineCache pipelineCache; err = vkCreatePipelineCache(m_device->device(), &pc_ci, NULL, &pipelineCache); ASSERT_VK_SUCCESS(err); err = vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &gp_ci, NULL, &pipeline); m_errorMonitor->VerifyFound(); // Finally, check the string validation for the shader stage pName variable. Correct the shader stage data, and bork the // string before calling again m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains invalid characters or is badly formed"); shaderStages[0] = vs.GetStageCreateInfo(); const uint8_t cont_char = 0xf8; char bad_string[] = {static_cast<char>(cont_char), static_cast<char>(cont_char), static_cast<char>(cont_char), static_cast<char>(cont_char)}; shaderStages[0].pName = bad_string; err = vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &gp_ci, NULL, &pipeline); m_errorMonitor->VerifyFound(); vkDestroyPipelineCache(m_device->device(), pipelineCache, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidPipelineSampleRateFeatureDisable) { // Enable sample shading in pipeline when the feature is disabled. ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Disable sampleRateShading here VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); device_features.sampleRateShading = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Cause the error by enabling sample shading... auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.sampleShadingEnable = VK_TRUE; }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineMultisampleStateCreateInfo-sampleShadingEnable-00784"); } TEST_F(VkLayerTest, InvalidPipelineSampleRateFeatureEnable) { // Enable sample shading in pipeline when the feature is disabled. ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Require sampleRateShading here VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); if (device_features.sampleRateShading == VK_FALSE) { printf("%s SampleRateShading feature is disabled -- skipping related checks.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto range_test = [this](float value, bool positive_test) { auto info_override = [value](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.sampleShadingEnable = VK_TRUE; helper.pipe_ms_state_ci_.minSampleShading = value; }; CreatePipelineHelper::OneshotTest(*this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineMultisampleStateCreateInfo-minSampleShading-00786", positive_test); }; range_test(NearestSmaller(0.0F), false); range_test(NearestGreater(1.0F), false); range_test(0.0F, /* positive_test= */ true); range_test(1.0F, /* positive_test= */ true); } TEST_F(VkLayerTest, InvalidPipelineSamplePNext) { // Enable sample shading in pipeline when the feature is disabled. ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Set up the extension structs auto sampleLocations = chain_util::Init<VkPipelineSampleLocationsStateCreateInfoEXT>(); auto coverageToColor = chain_util::Init<VkPipelineCoverageToColorStateCreateInfoNV>(); auto coverageModulation = chain_util::Init<VkPipelineCoverageModulationStateCreateInfoNV>(); auto discriminatrix = [this](const char *name) { return DeviceExtensionSupported(gpu(), nullptr, name); }; chain_util::ExtensionChain chain(discriminatrix, &m_device_extension_names); chain.Add(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME, sampleLocations); chain.Add(VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME, coverageToColor); chain.Add(VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME, coverageModulation); const void *extension_head = chain.Head(); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (extension_head) { auto good_chain = [extension_head](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.pNext = extension_head; }; CreatePipelineHelper::OneshotTest(*this, good_chain, (VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT), "No error", true); } else { printf("%s Required extension not present -- skipping positive checks.\n", kSkipPrefix); } auto instance_ci = chain_util::Init<VkInstanceCreateInfo>(); auto bad_chain = [&instance_ci](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.pNext = &instance_ci; }; CreatePipelineHelper::OneshotTest(*this, bad_chain, VK_DEBUG_REPORT_WARNING_BIT_EXT, "VUID-VkPipelineMultisampleStateCreateInfo-pNext-pNext"); } /*// TODO : This test should be good, but needs Tess support in compiler to run TEST_F(VkLayerTest, InvalidPatchControlPoints) { // Attempt to Create Gfx Pipeline w/o a VS VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH primitive "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), VK_DESCRIPTOR_POOL_USAGE_NON_FREE, 1, &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &dsl_binding; VkDescriptorSetLayout ds_layout; err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorSet descriptorSet; err = vkAllocateDescriptorSets(m_device->device(), ds_pool, VK_DESCRIPTOR_SET_USAGE_NON_FREE, 1, &ds_layout, &descriptorSet); ASSERT_VK_SUCCESS(err); VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout; err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); ASSERT_VK_SUCCESS(err); VkPipelineShaderStageCreateInfo shaderStages[3]; memset(&shaderStages, 0, 3 * sizeof(VkPipelineShaderStageCreateInfo)); VkShaderObj vs(m_device,bindStateVertShaderText,VK_SHADER_STAGE_VERTEX_BIT, this); // Just using VS txt for Tess shaders as we don't care about functionality VkShaderObj tc(m_device,bindStateVertShaderText,VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj te(m_device,bindStateVertShaderText,VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT; shaderStages[0].shader = vs.handle(); shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; shaderStages[1].stage = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT; shaderStages[1].shader = tc.handle(); shaderStages[2].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; shaderStages[2].stage = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT; shaderStages[2].shader = te.handle(); VkPipelineInputAssemblyStateCreateInfo iaCI = {}; iaCI.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; iaCI.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; VkPipelineTessellationStateCreateInfo tsCI = {}; tsCI.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; tsCI.patchControlPoints = 0; // This will cause an error VkGraphicsPipelineCreateInfo gp_ci = {}; gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci.pNext = NULL; gp_ci.stageCount = 3; gp_ci.pStages = shaderStages; gp_ci.pVertexInputState = NULL; gp_ci.pInputAssemblyState = &iaCI; gp_ci.pTessellationState = &tsCI; gp_ci.pViewportState = NULL; gp_ci.pRasterizationState = NULL; gp_ci.pMultisampleState = NULL; gp_ci.pDepthStencilState = NULL; gp_ci.pColorBlendState = NULL; gp_ci.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; gp_ci.layout = pipeline_layout; gp_ci.renderPass = renderPass(); VkPipelineCacheCreateInfo pc_ci = {}; pc_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pc_ci.pNext = NULL; pc_ci.initialSize = 0; pc_ci.initialData = 0; pc_ci.maxSize = 0; VkPipeline pipeline; VkPipelineCache pipelineCache; err = vkCreatePipelineCache(m_device->device(), &pc_ci, NULL, &pipelineCache); ASSERT_VK_SUCCESS(err); err = vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &gp_ci, NULL, &pipeline); m_errorMonitor->VerifyFound(); vkDestroyPipelineCache(m_device->device(), pipelineCache, NULL); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } */ TEST_F(VkLayerTest, PSOViewportStateTests) { TEST_DESCRIPTION("Test VkPipelineViewportStateCreateInfo viewport and scissor count validation for non-multiViewport"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto break_vp_state = [](CreatePipelineHelper &helper) { helper.rs_state_ci_.rasterizerDiscardEnable = VK_FALSE; helper.gp_ci_.pViewportState = nullptr; }; CreatePipelineHelper::OneshotTest(*this, break_vp_state, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750"); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[] = {scissor, scissor}; // test viewport and scissor arrays using std::vector; struct TestCase { uint32_t viewport_count; VkViewport *viewports; uint32_t scissor_count; VkRect2D *scissors; vector<std::string> vuids; }; vector<TestCase> test_cases = { {0, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {2, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, nullptr, 1, scissors, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}, {1, viewports, 1, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {1, nullptr, 1, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {2, nullptr, 3, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, }; for (const auto &test_case : test_cases) { const auto break_vp = [&test_case](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } vector<TestCase> dyn_test_cases = { {0, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {2, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, nullptr, 3, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, }; const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; for (const auto &test_case : dyn_test_cases) { const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = {}; dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dyn_state_ci.dynamicStateCount = size(dyn_states); dyn_state_ci.pDynamicStates = dyn_states; helper.dyn_state_ci_ = dyn_state_ci; helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } } // Set Extension dynamic states without enabling the required Extensions. TEST_F(VkLayerTest, ExtensionDynamicStatesSetWOExtensionEnabled) { TEST_DESCRIPTION("Create a graphics pipeline with Extension dynamic states without enabling the required Extensions."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); using std::vector; struct TestCase { uint32_t dynamic_state_count; VkDynamicState dynamic_state; char const *errmsg; }; vector<TestCase> dyn_test_cases = { {1, VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, "contains VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, but VK_NV_clip_space_w_scaling"}, {1, VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, "contains VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, but VK_EXT_discard_rectangles"}, {1, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, "contains VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, but VK_EXT_sample_locations"}, }; for (const auto &test_case : dyn_test_cases) { VkDynamicState state[1]; state[0] = test_case.dynamic_state; const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = {}; dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dyn_state_ci.dynamicStateCount = test_case.dynamic_state_count; dyn_state_ci.pDynamicStates = state; helper.dyn_state_ci_ = dyn_state_ci; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.errmsg); } } TEST_F(VkLayerTest, PSOViewportStateMultiViewportTests) { TEST_DESCRIPTION("Test VkPipelineViewportStateCreateInfo viewport and scissor count validation for multiViewport feature"); ASSERT_NO_FATAL_FAILURE(Init()); // enables all supported features if (!m_device->phy().features().multiViewport) { printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix); return; } // at least 16 viewports supported from here on ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[] = {scissor, scissor}; using std::vector; struct TestCase { uint32_t viewport_count; VkViewport *viewports; uint32_t scissor_count; VkRect2D *scissors; vector<std::string> vuids; }; vector<TestCase> test_cases = { {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, {2, nullptr, 2, scissors, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}, {2, viewports, 2, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {2, nullptr, 2, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, }; const auto max_viewports = m_device->phy().properties().limits.maxViewports; const bool max_viewports_maxxed = max_viewports == std::numeric_limits<decltype(max_viewports)>::max(); if (max_viewports_maxxed) { printf("%s VkPhysicalDeviceLimits::maxViewports is UINT32_MAX -- skipping part of test requiring to exceed maxViewports.\n", kSkipPrefix); } else { const auto too_much_viewports = max_viewports + 1; // avoid potentially big allocations by using only nullptr test_cases.push_back({too_much_viewports, nullptr, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}); test_cases.push_back({2, viewports, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}); test_cases.push_back( {too_much_viewports, nullptr, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}); } for (const auto &test_case : test_cases) { const auto break_vp = [&test_case](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } vector<TestCase> dyn_test_cases = { {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, }; if (!max_viewports_maxxed) { const auto too_much_viewports = max_viewports + 1; // avoid potentially big allocations by using only nullptr dyn_test_cases.push_back({too_much_viewports, nullptr, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}); dyn_test_cases.push_back({2, viewports, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}); dyn_test_cases.push_back({too_much_viewports, nullptr, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219"}}); } const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; for (const auto &test_case : dyn_test_cases) { const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = {}; dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dyn_state_ci.dynamicStateCount = size(dyn_states); dyn_state_ci.pDynamicStates = dyn_states; helper.dyn_state_ci_ = dyn_state_ci; helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } } TEST_F(VkLayerTest, DynViewportAndScissorUndefinedDrawState) { TEST_DESCRIPTION("Test viewport and scissor dynamic state that is not set before draw"); ASSERT_NO_FATAL_FAILURE(Init()); // TODO: should also test on !multiViewport if (!m_device->phy().features().multiViewport) { printf("%s Device does not support multiple viewports/scissors; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device); VkPipelineObj pipeline_dyn_vp(m_device); pipeline_dyn_vp.AddShader(&vs); pipeline_dyn_vp.AddShader(&fs); pipeline_dyn_vp.AddDefaultColorAttachment(); pipeline_dyn_vp.MakeDynamic(VK_DYNAMIC_STATE_VIEWPORT); pipeline_dyn_vp.SetScissor(m_scissors); ASSERT_VK_SUCCESS(pipeline_dyn_vp.CreateVKPipeline(pipeline_layout.handle(), m_renderPass)); VkPipelineObj pipeline_dyn_sc(m_device); pipeline_dyn_sc.AddShader(&vs); pipeline_dyn_sc.AddShader(&fs); pipeline_dyn_sc.AddDefaultColorAttachment(); pipeline_dyn_sc.SetViewport(m_viewports); pipeline_dyn_sc.MakeDynamic(VK_DYNAMIC_STATE_SCISSOR); ASSERT_VK_SUCCESS(pipeline_dyn_sc.CreateVKPipeline(pipeline_layout.handle(), m_renderPass)); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic viewport(s) 0 are used by pipeline state object, "); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_dyn_vp.handle()); vkCmdSetViewport(m_commandBuffer->handle(), 1, 1, &m_viewports[0]); // Forgetting to set needed 0th viewport (PSO viewportCount == 1) m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic scissor(s) 0 are used by pipeline state object, "); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_dyn_sc.handle()); vkCmdSetScissor(m_commandBuffer->handle(), 1, 1, &m_scissors[0]); // Forgetting to set needed 0th scissor (PSO scissorCount == 1) m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, PSOLineWidthInvalid) { TEST_DESCRIPTION("Test non-1.0 lineWidth errors when pipeline is created and in vkCmdSetLineWidth"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo shader_state_cis[] = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; VkPipelineVertexInputStateCreateInfo vi_state_ci = {}; vi_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; VkPipelineInputAssemblyStateCreateInfo ia_state_ci = {}; ia_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state_ci.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo vp_state_ci = {}; vp_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp_state_ci.viewportCount = 1; vp_state_ci.pViewports = &viewport; vp_state_ci.scissorCount = 1; vp_state_ci.pScissors = &scissor; VkPipelineRasterizationStateCreateInfo rs_state_ci = {}; rs_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state_ci.rasterizerDiscardEnable = VK_FALSE; // lineWidth to be set by checks VkPipelineMultisampleStateCreateInfo ms_state_ci = {}; ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; // must match subpass att. VkPipelineColorBlendAttachmentState cba_state = {}; VkPipelineColorBlendStateCreateInfo cb_state_ci = {}; cb_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; cb_state_ci.attachmentCount = 1; // must match count in subpass cb_state_ci.pAttachments = &cba_state; const VkPipelineLayoutObj pipeline_layout(m_device); VkGraphicsPipelineCreateInfo gp_ci = {}; gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci.stageCount = sizeof(shader_state_cis) / sizeof(VkPipelineShaderStageCreateInfo); gp_ci.pStages = shader_state_cis; gp_ci.pVertexInputState = &vi_state_ci; gp_ci.pInputAssemblyState = &ia_state_ci; gp_ci.pViewportState = &vp_state_ci; gp_ci.pRasterizationState = &rs_state_ci; gp_ci.pMultisampleState = &ms_state_ci; gp_ci.pColorBlendState = &cb_state_ci; gp_ci.layout = pipeline_layout.handle(); gp_ci.renderPass = renderPass(); gp_ci.subpass = 0; const std::vector<float> test_cases = {-1.0f, 0.0f, NearestSmaller(1.0f), NearestGreater(1.0f), NAN}; // test VkPipelineRasterizationStateCreateInfo::lineWidth for (const auto test_case : test_cases) { rs_state_ci.lineWidth = test_case; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00749"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), VK_NULL_HANDLE, 1, &gp_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // test vkCmdSetLineWidth m_commandBuffer->begin(); for (const auto test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetLineWidth-lineWidth-00788"); vkCmdSetLineWidth(m_commandBuffer->handle(), test_case); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, VUID_VkVertexInputBindingDescription_binding_00618) { TEST_DESCRIPTION( "Test VUID-VkVertexInputBindingDescription-binding-00618: binding must be less than " "VkPhysicalDeviceLimits::maxVertexInputBindings"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when binding is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings. VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.binding = m_device->props.limits.maxVertexInputBindings; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 1; vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description; vertex_input_state.vertexAttributeDescriptionCount = 0; vertex_input_state.pVertexAttributeDescriptions = nullptr; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputBindingDescription-binding-00618"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputBindingDescription_stride_00619) { TEST_DESCRIPTION( "Test VUID-VkVertexInputBindingDescription-stride-00619: stride must be less than or equal to " "VkPhysicalDeviceLimits::maxVertexInputBindingStride"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when stride is greater than VkPhysicalDeviceLimits::maxVertexInputBindingStride. VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.stride = m_device->props.limits.maxVertexInputBindingStride + 1; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 1; vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description; vertex_input_state.vertexAttributeDescriptionCount = 0; vertex_input_state.pVertexAttributeDescriptions = nullptr; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputBindingDescription-stride-00619"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_location_00620) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-location-00620: location must be less than " "VkPhysicalDeviceLimits::maxVertexInputAttributes"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when location is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputAttributes. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.location = m_device->props.limits.maxVertexInputAttributes; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 0; vertex_input_state.pVertexBindingDescriptions = nullptr; vertex_input_state.vertexAttributeDescriptionCount = 1; vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-location-00620"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-binding-00615"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_binding_00621) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-binding-00621: binding must be less than " "VkPhysicalDeviceLimits::maxVertexInputBindings"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when binding is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.binding = m_device->props.limits.maxVertexInputBindings; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 0; vertex_input_state.pVertexBindingDescriptions = nullptr; vertex_input_state.vertexAttributeDescriptionCount = 1; vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-binding-00621"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-binding-00615"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_offset_00622) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-offset-00622: offset must be less than or equal to " "VkPhysicalDeviceLimits::maxVertexInputAttributeOffset"); EnableDeviceProfileLayer(); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); uint32_t maxVertexInputAttributeOffset = 0; { VkPhysicalDeviceProperties device_props = {}; vkGetPhysicalDeviceProperties(gpu(), &device_props); maxVertexInputAttributeOffset = device_props.limits.maxVertexInputAttributeOffset; if (maxVertexInputAttributeOffset == 0xFFFFFFFF) { // Attempt to artificially lower maximum offset PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = (PFN_vkSetPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceLimitsEXT"); if (!fpvkSetPhysicalDeviceLimitsEXT) { printf("%s All offsets are valid & device_profile_api not found; skipped.\n", kSkipPrefix); return; } device_props.limits.maxVertexInputAttributeOffset = device_props.limits.maxVertexInputBindingStride - 2; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &device_props.limits); maxVertexInputAttributeOffset = device_props.limits.maxVertexInputAttributeOffset; } } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.binding = 0; vertex_input_binding_description.stride = m_device->props.limits.maxVertexInputBindingStride; vertex_input_binding_description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; // Test when offset is greater than maximum. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.format = VK_FORMAT_R8_UNORM; vertex_input_attribute_description.offset = maxVertexInputAttributeOffset + 1; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 1; vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description; vertex_input_state.vertexAttributeDescriptionCount = 1; vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_TRUE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = nullptr; // no viewport b/c rasterizer is disabled create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-offset-00622"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, NullRenderPass) { // Bind a NULL RenderPass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdBeginRenderPass: required parameter pRenderPassBegin specified as NULL"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); // Don't care about RenderPass handle b/c error should be flagged before // that vkCmdBeginRenderPass(m_commandBuffer->handle(), NULL, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, EndCommandBufferWithinRenderPass) { TEST_DESCRIPTION("End a command buffer with an active render pass"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkEndCommandBuffer(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); // End command buffer properly to avoid driver issues. This is safe -- the // previous vkEndCommandBuffer should not have reached the driver. m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // TODO: Add test for VK_COMMAND_BUFFER_LEVEL_SECONDARY // TODO: Add test for VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT } TEST_F(VkLayerTest, FillBufferWithinRenderPass) { // Call CmdFillBuffer within an active renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj dstBuffer; dstBuffer.init_as_dst(*m_device, (VkDeviceSize)1024, reqs); m_commandBuffer->FillBuffer(dstBuffer.handle(), 0, 4, 0x11111111); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, UpdateBufferWithinRenderPass) { // Call CmdUpdateBuffer within an active renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj dstBuffer; dstBuffer.init_as_dst(*m_device, (VkDeviceSize)1024, reqs); VkDeviceSize dstOffset = 0; uint32_t Data[] = {1, 2, 3, 4, 5, 6, 7, 8}; VkDeviceSize dataSize = sizeof(Data) / sizeof(uint32_t); vkCmdUpdateBuffer(m_commandBuffer->handle(), dstBuffer.handle(), dstOffset, dataSize, &Data); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ClearColorImageWithBadRange) { TEST_DESCRIPTION("Record clear color with an invalid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearColorValue clear_color = {{0.0f, 0.0f, 0.0f, 1.0f}}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseMipLevel-01470"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseMipLevel-01470"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseArrayLayer-01472"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseArrayLayer-01472"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ClearDepthStencilWithBadRange) { TEST_DESCRIPTION("Record clear depth with an invalid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageObj image(m_device); image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); const VkImageAspectFlags ds_aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; image.SetLayout(ds_aspect, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearDepthStencilValue clear_value = {}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474"); const VkImageSubresourceRange range = {ds_aspect, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694"); const VkImageSubresourceRange range = {ds_aspect, 1, 1, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694"); const VkImageSubresourceRange range = {ds_aspect, 0, 0, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694"); const VkImageSubresourceRange range = {ds_aspect, 0, 2, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 1, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 0}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 2}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ClearColorImageWithinRenderPass) { // Call CmdClearColorImage within an active RenderPass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkClearColorValue clear_color; memset(clear_color.uint32, 0, sizeof(uint32_t) * 4); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image dstImage; dstImage.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs); const VkImageSubresourceRange range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_COLOR_BIT); vkCmdClearColorImage(m_commandBuffer->handle(), dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &range); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ClearDepthStencilImageErrors) { // Hit errors related to vkCmdClearDepthStencilImage() // 1. Use an image that doesn't have VK_IMAGE_USAGE_TRANSFER_DST_BIT set // 2. Call CmdClearDepthStencilImage within an active RenderPass ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkClearDepthStencilValue clear_value = {0}; VkMemoryPropertyFlags reqs = 0; VkImageCreateInfo image_create_info = vk_testing::Image::create_info(); image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = depth_format; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Error here is that VK_IMAGE_USAGE_TRANSFER_DST_BIT is excluded for DS image that we'll call Clear on below image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; vk_testing::Image dst_image_bad_usage; dst_image_bad_usage.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs); const VkImageSubresourceRange range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_DEPTH_BIT); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-image-00009"); vkCmdClearDepthStencilImage(m_commandBuffer->handle(), dst_image_bad_usage.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &range); m_errorMonitor->VerifyFound(); // Fix usage for next test case image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image dst_image; dst_image.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-renderpass"); vkCmdClearDepthStencilImage(m_commandBuffer->handle(), dst_image.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &range); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ClearColorAttachmentsOutsideRenderPass) { // Call CmdClearAttachmentss outside of an active RenderPass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearAttachments(): This call must be issued inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Start no RenderPass m_commandBuffer->begin(); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0; color_attachment.clearValue.color.float32[1] = 0; color_attachment.clearValue.color.float32[2] = 0; color_attachment.clearValue.color.float32[3] = 0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {32, 32}}}; vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, BufferMemoryBarrierNoBuffer) { // Try to add a buffer memory barrier with no buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pBufferMemoryBarriers[0].buffer specified as VK_NULL_HANDLE"); ASSERT_NO_FATAL_FAILURE(Init()); m_commandBuffer->begin(); VkBufferMemoryBarrier buf_barrier = {}; buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; buf_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; buf_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; buf_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.buffer = VK_NULL_HANDLE; buf_barrier.offset = 0; buf_barrier.size = VK_WHOLE_SIZE; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidBarriers) { TEST_DESCRIPTION("A variety of ways to get VK_INVALID_BARRIER "); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } // Add a token self-dependency for this test to avoid unexpected errors m_addRenderPassSelfDependency = true; ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); // Use image unbound to memory in barrier m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindImageMemory()"); vk_testing::Image unbound_image; auto unbound_image_info = vk_testing::Image::create_info(); unbound_image_info.format = VK_FORMAT_B8G8R8A8_UNORM; unbound_image_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; unbound_image.init_no_mem(*m_device, unbound_image_info); auto unbound_subresource = vk_testing::Image::subresource_range(unbound_image_info, VK_IMAGE_ASPECT_COLOR_BIT); auto unbound_image_barrier = unbound_image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, unbound_subresource); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &unbound_image_barrier); m_errorMonitor->VerifyFound(); // Use buffer unbound to memory in barrier m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()"); VkBufferObj unbound_buffer; auto unbound_buffer_info = VkBufferObj::create_info(16, VK_IMAGE_USAGE_TRANSFER_DST_BIT); unbound_buffer.init_no_mem(*m_device, unbound_buffer_info); auto unbound_buffer_barrier = unbound_buffer.buffer_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, 0, 16); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 1, &unbound_buffer_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-newLayout-01198"); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = NULL; img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; // New layout can't be UNDEFINED img_barrier.newLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier.image = m_renderTargets[0]->handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Transition image to color attachment optimal img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); // TODO: this looks vestigal or incomplete... m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // Can't send buffer memory barrier during a render pass vkCmdEndRenderPass(m_commandBuffer->handle()); // Duplicate barriers that change layout img_barrier.image = image.handle(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; VkImageMemoryBarrier img_barriers[2] = {img_barrier, img_barrier}; // Transitions from UNDEFINED are valid, even if duplicated m_errorMonitor->ExpectSuccess(); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 2, img_barriers); m_errorMonitor->VerifyNotFound(); // Duplication of layout transitions (not from undefined) are not valid img_barriers[0].oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barriers[0].newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barriers[1].oldLayout = img_barriers[0].oldLayout; img_barriers[1].newLayout = img_barriers[0].newLayout; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-01197"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 2, img_barriers); m_errorMonitor->VerifyFound(); VkBufferObj buffer; VkMemoryPropertyFlags mem_reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; buffer.init_as_src_and_dst(*m_device, 256, mem_reqs); VkBufferMemoryBarrier buf_barrier = {}; buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; buf_barrier.pNext = NULL; buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; buf_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; buf_barrier.buffer = buffer.handle(); buf_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.offset = 0; buf_barrier.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferMemoryBarrier-offset-01187"); // Exceed the buffer size buf_barrier.offset = buffer.create_info().size + 1; // Offset greater than total size vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); buf_barrier.offset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferMemoryBarrier-size-01189"); buf_barrier.size = buffer.create_info().size + 1; // Size greater than total size vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Now exercise barrier aspect bit errors, first DS m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-image-01207"); VkDepthStencilObj ds_image(m_device); ds_image.Init(m_device, 128, 128, depth_format); ASSERT_TRUE(ds_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = ds_image.handle(); // Not having DEPTH or STENCIL set is an error img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Having only one of depth or stencil set for DS image is an error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-image-01207"); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Having anything other than DEPTH and STENCIL is an error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Now test depth-only VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D16_UNORM, &format_props); if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) { VkDepthStencilObj d_image(m_device); d_image.Init(m_device, 128, 128, VK_FORMAT_D16_UNORM); ASSERT_TRUE(d_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = d_image.handle(); // DEPTH bit must be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // No bits other than DEPTH may be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Now test stencil-only vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_S8_UINT, &format_props); if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) { VkDepthStencilObj s_image(m_device); s_image.Init(m_device, 128, 128, VK_FORMAT_S8_UINT); ASSERT_TRUE(s_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = s_image.handle(); // Use of COLOR aspect on depth image is error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Finally test color VkImageObj c_image(m_device); c_image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(c_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = c_image.handle(); // COLOR bit must be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // No bits other than COLOR may be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // A barrier's new and old VkImageLayout must be compatible with an image's VkImageUsageFlags. { VkImageObj img_color(m_device); img_color.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_color.initialized()); VkImageObj img_ds(m_device); img_ds.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_ds.initialized()); VkImageObj img_xfer_src(m_device); img_xfer_src.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_src.initialized()); VkImageObj img_xfer_dst(m_device); img_xfer_dst.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_dst.initialized()); VkImageObj img_sampled(m_device); img_sampled.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_sampled.initialized()); VkImageObj img_input(m_device); img_input.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_input.initialized()); const struct { VkImageObj &image_obj; VkImageLayout bad_layout; std::string msg_code; } bad_buffer_layouts[] = { // clang-format off // images _without_ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT {img_ds, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_xfer_src, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_xfer_dst, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_sampled, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_input, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, // images _without_ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT {img_color, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_xfer_src, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_xfer_dst, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_sampled, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_input, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_color, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_xfer_src, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_xfer_dst, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_sampled, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_input, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, // images _without_ VK_IMAGE_USAGE_SAMPLED_BIT or VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT {img_color, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, {img_ds, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, {img_xfer_src, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, {img_xfer_dst, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, // images _without_ VK_IMAGE_USAGE_TRANSFER_SRC_BIT {img_color, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_ds, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_xfer_dst, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_sampled, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_input, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, // images _without_ VK_IMAGE_USAGE_TRANSFER_DST_BIT {img_color, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_ds, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_xfer_src, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_sampled, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_input, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, // clang-format on }; const uint32_t layout_count = sizeof(bad_buffer_layouts) / sizeof(bad_buffer_layouts[0]); for (uint32_t i = 0; i < layout_count; ++i) { img_barrier.image = bad_buffer_layouts[i].image_obj.handle(); const VkImageUsageFlags usage = bad_buffer_layouts[i].image_obj.usage(); img_barrier.subresourceRange.aspectMask = (usage == VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ? (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) : VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.oldLayout = bad_buffer_layouts[i].bad_layout; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_buffer_layouts[i].msg_code); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = bad_buffer_layouts[i].bad_layout; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_buffer_layouts[i].msg_code); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; } // Attempt barrier where srcAccessMask is not supported by srcStageMask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184"); // Have lower-order bit that's supported (shader write), but higher-order bit not supported to verify multi-bit validation buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT; buf_barrier.offset = 0; buf_barrier.size = VK_WHOLE_SIZE; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Attempt barrier where dsAccessMask is not supported by dstStageMask buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Attempt to mismatch barriers/waitEvents calls with incompatible queues // Create command pool with incompatible queueflags const std::vector<VkQueueFamilyProperties> queue_props = m_device->queue_props; uint32_t queue_family_index = m_device->QueueFamilyMatching(VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT); if (queue_family_index == UINT32_MAX) { printf("%s No non-compute queue supporting graphics found; skipped.\n", kSkipPrefix); return; // NOTE: this exits the test function! } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-01183"); VkCommandPoolObj command_pool(m_device, queue_family_index, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj bad_command_buffer(m_device, &command_pool); bad_command_buffer.begin(); buf_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; // Set two bits that should both be supported as a bonus positive check buf_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; vkCmdPipelineBarrier(bad_command_buffer.handle(), VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Check for error for trying to wait on pipeline stage not supported by this queue. Specifically since our queue is not a // compute queue, vkCmdWaitEvents cannot have it's source stage mask be VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-srcStageMask-01164"); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); vkCmdWaitEvents(bad_command_buffer.handle(), 1, &event, /*source stage mask*/ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, nullptr, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); bad_command_buffer.end(); vkDestroyEvent(m_device->device(), event, nullptr); } // Helpers for the tests below static void ValidOwnershipTransferOp(ErrorMonitor *monitor, VkCommandBufferObj *cb, VkPipelineStageFlags src_stages, VkPipelineStageFlags dst_stages, const VkBufferMemoryBarrier *buf_barrier, const VkImageMemoryBarrier *img_barrier) { monitor->ExpectSuccess(); cb->begin(); uint32_t num_buf_barrier = (buf_barrier) ? 1 : 0; uint32_t num_img_barrier = (img_barrier) ? 1 : 0; cb->PipelineBarrier(src_stages, dst_stages, 0, 0, nullptr, num_buf_barrier, buf_barrier, num_img_barrier, img_barrier); cb->end(); cb->QueueCommandBuffer(); // Implicitly waits monitor->VerifyNotFound(); } static void ValidOwnershipTransfer(ErrorMonitor *monitor, VkCommandBufferObj *cb_from, VkCommandBufferObj *cb_to, VkPipelineStageFlags src_stages, VkPipelineStageFlags dst_stages, const VkBufferMemoryBarrier *buf_barrier, const VkImageMemoryBarrier *img_barrier) { ValidOwnershipTransferOp(monitor, cb_from, src_stages, dst_stages, buf_barrier, img_barrier); ValidOwnershipTransferOp(monitor, cb_to, src_stages, dst_stages, buf_barrier, img_barrier); } TEST_F(VkPositiveLayerTest, OwnershipTranfersImage) { TEST_DESCRIPTION("Valid image ownership transfers that shouldn't create errors"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT); if (no_gfx == UINT32_MAX) { printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix); return; } VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get(); VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue); // Create an "exclusive" image owned by the graphics queue. VkImageObj image(m_device); VkFlags image_use = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, image_use, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); auto image_subres = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1); auto image_barrier = image.image_memory_barrier(0, 0, image.Layout(), image.Layout(), image_subres); image_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_; image_barrier.dstQueueFamilyIndex = no_gfx; ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, nullptr, &image_barrier); // Change layouts while changing ownership image_barrier.srcQueueFamilyIndex = no_gfx; image_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_; image_barrier.oldLayout = image.Layout(); // Make sure the new layout is different from the old if (image_barrier.oldLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { image_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } else { image_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; } ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, nullptr, &image_barrier); } TEST_F(VkPositiveLayerTest, OwnershipTranfersBuffer) { TEST_DESCRIPTION("Valid buffer ownership transfers that shouldn't create errors"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT); if (no_gfx == UINT32_MAX) { printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix); return; } VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get(); VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue); // Create a buffer const VkDeviceSize buffer_size = 256; uint8_t data[buffer_size] = {0xFF}; VkConstantBufferObj buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT); ASSERT_TRUE(buffer.initialized()); auto buffer_barrier = buffer.buffer_memory_barrier(0, 0, 0, VK_WHOLE_SIZE); // Let gfx own it. buffer_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_; buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_; ValidOwnershipTransferOp(m_errorMonitor, m_commandBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, &buffer_barrier, nullptr); // Transfer it to non-gfx buffer_barrier.dstQueueFamilyIndex = no_gfx; ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, &buffer_barrier, nullptr); // Transfer it to gfx buffer_barrier.srcQueueFamilyIndex = no_gfx; buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_; ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, &buffer_barrier, nullptr); } class BarrierQueueFamilyTestHelper { public: struct QueueFamilyObjs { uint32_t index; // We would use std::unique_ptr, but this triggers a compiler error on older compilers VkQueueObj *queue = nullptr; VkCommandPoolObj *command_pool = nullptr; VkCommandBufferObj *command_buffer = nullptr; VkCommandBufferObj *command_buffer2 = nullptr; ~QueueFamilyObjs() { delete command_buffer2; delete command_buffer; delete command_pool; delete queue; } void Init(VkDeviceObj *device, uint32_t qf_index, VkQueue qf_queue, VkCommandPoolCreateFlags cp_flags) { index = qf_index; queue = new VkQueueObj(qf_queue, qf_index); command_pool = new VkCommandPoolObj(device, qf_index, cp_flags); command_buffer = new VkCommandBufferObj(device, command_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, queue); command_buffer2 = new VkCommandBufferObj(device, command_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, queue); }; }; struct Context { VkLayerTest *layer_test; uint32_t default_index; std::unordered_map<uint32_t, QueueFamilyObjs> queue_families; Context(VkLayerTest *test, const std::vector<uint32_t> &queue_family_indices) : layer_test(test) { if (0 == queue_family_indices.size()) { return; // This is invalid } VkDeviceObj *device_obj = layer_test->DeviceObj(); queue_families.reserve(queue_family_indices.size()); default_index = queue_family_indices[0]; for (auto qfi : queue_family_indices) { VkQueue queue = device_obj->queue_family_queues(qfi)[0]->handle(); queue_families.emplace(std::make_pair(qfi, QueueFamilyObjs())); queue_families[qfi].Init(device_obj, qfi, queue, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); } Reset(); } void Reset() { layer_test->DeviceObj()->wait(); for (auto &qf : queue_families) { vkResetCommandPool(layer_test->device(), qf.second.command_pool->handle(), 0); } } }; BarrierQueueFamilyTestHelper(Context *context) : context_(context), image_(context->layer_test->DeviceObj()) {} // Init with queue families non-null for CONCURRENT sharing mode (which requires them) void Init(std::vector<uint32_t> *families) { VkDeviceObj *device_obj = context_->layer_test->DeviceObj(); image_.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0, families); ASSERT_TRUE(image_.initialized()); image_barrier_ = image_.image_memory_barrier(VK_ACCESS_TRANSFER_READ_BIT, VK_ACCESS_TRANSFER_READ_BIT, image_.Layout(), image_.Layout(), image_.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1)); VkMemoryPropertyFlags mem_prop = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; buffer_.init_as_src_and_dst(*device_obj, 256, mem_prop, families); ASSERT_TRUE(buffer_.initialized()); buffer_barrier_ = buffer_.buffer_memory_barrier(VK_ACCESS_TRANSFER_READ_BIT, VK_ACCESS_TRANSFER_READ_BIT, 0, VK_WHOLE_SIZE); } QueueFamilyObjs *GetQueueFamilyInfo(Context *context, uint32_t qfi) { QueueFamilyObjs *qf; auto qf_it = context->queue_families.find(qfi); if (qf_it != context->queue_families.end()) { qf = &(qf_it->second); } else { qf = &(context->queue_families[context->default_index]); } return qf; } enum Modifier { NONE, DOUBLE_RECORD, DOUBLE_COMMAND_BUFFER, }; void operator()(std::string img_err, std::string buf_err, uint32_t src, uint32_t dst, bool positive = false, uint32_t queue_family_index = kInvalidQueueFamily, Modifier mod = Modifier::NONE) { auto monitor = context_->layer_test->Monitor(); monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT, img_err); monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT, buf_err); image_barrier_.srcQueueFamilyIndex = src; image_barrier_.dstQueueFamilyIndex = dst; buffer_barrier_.srcQueueFamilyIndex = src; buffer_barrier_.dstQueueFamilyIndex = dst; QueueFamilyObjs *qf = GetQueueFamilyInfo(context_, queue_family_index); VkCommandBufferObj *command_buffer = qf->command_buffer; for (int cb_repeat = 0; cb_repeat < (mod == Modifier::DOUBLE_COMMAND_BUFFER ? 2 : 1); cb_repeat++) { command_buffer->begin(); for (int repeat = 0; repeat < (mod == Modifier::DOUBLE_RECORD ? 2 : 1); repeat++) { vkCmdPipelineBarrier(command_buffer->handle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buffer_barrier_, 1, &image_barrier_); } command_buffer->end(); command_buffer = qf->command_buffer2; // Second pass (if any) goes to the secondary command_buffer. } if (queue_family_index != kInvalidQueueFamily) { if (mod == Modifier::DOUBLE_COMMAND_BUFFER) { // the Fence resolves to VK_NULL_HANLE... i.e. no fence qf->queue->submit({{qf->command_buffer, qf->command_buffer2}}, vk_testing::Fence(), positive); } else { qf->command_buffer->QueueCommandBuffer(positive); // Check for success on positive tests only } } if (positive) { monitor->VerifyNotFound(); } else { monitor->VerifyFound(); } context_->Reset(); }; protected: static const uint32_t kInvalidQueueFamily = UINT32_MAX; Context *context_; VkImageObj image_; VkImageMemoryBarrier image_barrier_; VkBufferObj buffer_; VkBufferMemoryBarrier buffer_barrier_; }; TEST_F(VkLayerTest, InvalidBarrierQueueFamily) { TEST_DESCRIPTION("Create and submit barriers with invalid queue families"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Find queues of two families const uint32_t submit_family = m_device->graphics_queue_node_index_; const uint32_t invalid = static_cast<uint32_t>(m_device->queue_props.size()); const uint32_t other_family = submit_family != 0 ? 0 : 1; const bool only_one_family = (invalid == 1) || (m_device->queue_props[other_family].queueCount == 0); std::vector<uint32_t> qf_indices{{submit_family, other_family}}; if (only_one_family) { qf_indices.resize(1); } BarrierQueueFamilyTestHelper::Context test_context(this, qf_indices); if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { printf( "%s Device has apiVersion greater than 1.0 -- skipping test cases that require external memory " "to be " "disabled.\n", kSkipPrefix); } else { if (only_one_family) { printf("%s Single queue family found -- VK_SHARING_MODE_CONCURRENT testcases skipped.\n", kSkipPrefix); } else { std::vector<uint32_t> families = {submit_family, other_family}; BarrierQueueFamilyTestHelper conc_test(&test_context); conc_test.Init(&families); // core_validation::barrier_queue_families::kSrcAndDestMustBeIgnore conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", VK_QUEUE_FAMILY_IGNORED, submit_family); conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", submit_family, VK_QUEUE_FAMILY_IGNORED); conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", submit_family, submit_family); // true -> positive test conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); } BarrierQueueFamilyTestHelper excl_test(&test_context); excl_test.Init(nullptr); // no queue families means *exclusive* sharing mode. // core_validation::barrier_queue_families::kBothIgnoreOrBothValid excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", VK_QUEUE_FAMILY_IGNORED, submit_family); excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", submit_family, VK_QUEUE_FAMILY_IGNORED); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", submit_family, submit_family, true); excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); } if (only_one_family) { printf("%s Single queue family found -- VK_SHARING_MODE_EXCLUSIVE submit testcases skipped.\n", kSkipPrefix); } else { BarrierQueueFamilyTestHelper excl_test(&test_context); excl_test.Init(nullptr); // core_validation::barrier_queue_families::kSubmitQueueMustMatchSrcOrDst excl_test("VUID-VkImageMemoryBarrier-image-01205", "VUID-VkBufferMemoryBarrier-buffer-01196", other_family, other_family, false, submit_family); // true -> positive test (testing both the index logic and the QFO transfer tracking. excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, submit_family); excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, other_family); excl_test("POSITIVE_TEST", "POSITIVE_TEST", other_family, submit_family, true, other_family); excl_test("POSITIVE_TEST", "POSITIVE_TEST", other_family, submit_family, true, submit_family); // negative testing for QFO transfer tracking // Duplicate release in one CB excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00001", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00001", submit_family, other_family, false, submit_family, BarrierQueueFamilyTestHelper::DOUBLE_RECORD); // Duplicate pending release excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00003", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00003", submit_family, other_family, false, submit_family); // Duplicate acquire in one CB excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00001", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00001", submit_family, other_family, false, other_family, BarrierQueueFamilyTestHelper::DOUBLE_RECORD); // No pending release excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00004", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00004", submit_family, other_family, false, other_family); // Duplicate release in two CB excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00002", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00002", submit_family, other_family, false, submit_family, BarrierQueueFamilyTestHelper::DOUBLE_COMMAND_BUFFER); // Duplicate acquire in two CB excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, submit_family); // need a succesful release excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00002", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00002", submit_family, other_family, false, other_family, BarrierQueueFamilyTestHelper::DOUBLE_COMMAND_BUFFER); } } TEST_F(VkLayerTest, InvalidBarrierQueueFamilyWithMemExt) { TEST_DESCRIPTION("Create and submit barriers with invalid queue families when memory extension is enabled "); std::vector<const char *> reqd_instance_extensions = { {VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME}}; for (auto extension_name : reqd_instance_extensions) { if (InstanceExtensionSupported(extension_name)) { m_instance_extension_names.push_back(extension_name); } else { printf("%s Required instance extension %s not supported, skipping test\n", kSkipPrefix, extension_name); return; } } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external memory device extensions if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); } else { printf("%s External memory extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Find queues of two families const uint32_t submit_family = m_device->graphics_queue_node_index_; const uint32_t invalid = static_cast<uint32_t>(m_device->queue_props.size()); const uint32_t other_family = submit_family != 0 ? 0 : 1; const bool only_one_family = (invalid == 1) || (m_device->queue_props[other_family].queueCount == 0); std::vector<uint32_t> qf_indices{{submit_family, other_family}}; if (only_one_family) { qf_indices.resize(1); } BarrierQueueFamilyTestHelper::Context test_context(this, qf_indices); if (only_one_family) { printf("%s Single queue family found -- VK_SHARING_MODE_CONCURRENT testcases skipped.\n", kSkipPrefix); } else { std::vector<uint32_t> families = {submit_family, other_family}; BarrierQueueFamilyTestHelper conc_test(&test_context); // core_validation::barrier_queue_families::kSrcOrDstMustBeIgnore conc_test.Init(&families); conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", submit_family, submit_family); // true -> positive test conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL_KHR, true); conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_EXTERNAL_KHR, VK_QUEUE_FAMILY_IGNORED, true); // core_validation::barrier_queue_families::kSpecialOrIgnoreOnly conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", submit_family, VK_QUEUE_FAMILY_IGNORED); conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_IGNORED, submit_family); // This is to flag the errors that would be considered only "unexpected" in the parallel case above // true -> positive test conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL_KHR, true); conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_EXTERNAL_KHR, VK_QUEUE_FAMILY_IGNORED, true); } BarrierQueueFamilyTestHelper excl_test(&test_context); excl_test.Init(nullptr); // no queue families means *exclusive* sharing mode. // core_validation::barrier_queue_families::kSrcIgnoreRequiresDstIgnore excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED, submit_family); excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL_KHR); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); // core_validation::barrier_queue_families::kDstValidOrSpecialIfNotIgnore excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, invalid); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, submit_family, true); excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, VK_QUEUE_FAMILY_IGNORED, true); excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, VK_QUEUE_FAMILY_EXTERNAL_KHR, true); // core_validation::barrier_queue_families::kSrcValidOrSpecialIfNotIgnore excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", invalid, submit_family); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", submit_family, submit_family, true); excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", VK_QUEUE_FAMILY_EXTERNAL_KHR, submit_family, true); } TEST_F(VkLayerTest, ImageBarrierWithBadRange) { TEST_DESCRIPTION("VkImageMemoryBarrier with an invalid subresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier img_barrier_template = {}; img_barrier_template.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier_template.pNext = NULL; img_barrier_template.srcAccessMask = 0; img_barrier_template.dstAccessMask = 0; img_barrier_template.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier_template.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier_template.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier_template.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier_template.image = image.handle(); // subresourceRange to be set later for the for the purposes of this test img_barrier_template.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier_template.subresourceRange.baseArrayLayer = 0; img_barrier_template.subresourceRange.baseMipLevel = 0; img_barrier_template.subresourceRange.layerCount = 0; img_barrier_template.subresourceRange.levelCount = 0; m_commandBuffer->begin(); // Nested scope here confuses clang-format, somehow // clang-format off // try for vkCmdPipelineBarrier { // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } } // try for vkCmdWaitEvents { VkEvent event; VkEventCreateInfo eci{VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, NULL, 0}; VkResult err = vkCreateEvent(m_device->handle(), &eci, nullptr, &event); ASSERT_VK_SUCCESS(err); // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } vkDestroyEvent(m_device->handle(), event, nullptr); } // clang-format on } TEST_F(VkLayerTest, ValidationCacheTestBadMerge) { ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), "VK_LAYER_LUNARG_core_validation", VK_EXT_VALIDATION_CACHE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_VALIDATION_CACHE_EXTENSION_NAME); } else { printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Load extension functions auto fpCreateValidationCache = (PFN_vkCreateValidationCacheEXT)vkGetDeviceProcAddr(m_device->device(), "vkCreateValidationCacheEXT"); auto fpDestroyValidationCache = (PFN_vkDestroyValidationCacheEXT)vkGetDeviceProcAddr(m_device->device(), "vkDestroyValidationCacheEXT"); auto fpMergeValidationCaches = (PFN_vkMergeValidationCachesEXT)vkGetDeviceProcAddr(m_device->device(), "vkMergeValidationCachesEXT"); if (!fpCreateValidationCache || !fpDestroyValidationCache || !fpMergeValidationCaches) { printf("%s Failed to load function pointers for %s\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME); return; } VkValidationCacheCreateInfoEXT validationCacheCreateInfo; validationCacheCreateInfo.sType = VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT; validationCacheCreateInfo.pNext = NULL; validationCacheCreateInfo.initialDataSize = 0; validationCacheCreateInfo.pInitialData = NULL; validationCacheCreateInfo.flags = 0; VkValidationCacheEXT validationCache = VK_NULL_HANDLE; VkResult res = fpCreateValidationCache(m_device->device(), &validationCacheCreateInfo, nullptr, &validationCache); ASSERT_VK_SUCCESS(res); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkMergeValidationCachesEXT-dstCache-01536"); res = fpMergeValidationCaches(m_device->device(), validationCache, 1, &validationCache); m_errorMonitor->VerifyFound(); fpDestroyValidationCache(m_device->device(), validationCache, nullptr); } TEST_F(VkPositiveLayerTest, LayoutFromPresentWithoutAccessMemoryRead) { // Transition an image away from PRESENT_SRC_KHR without ACCESS_MEMORY_READ // in srcAccessMask. // The required behavior here was a bit unclear in earlier versions of the // spec, but there is no memory dependency required here, so this should // work without warnings. m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT), VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier barrier = {}; VkImageSubresourceRange range; barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; barrier.dstAccessMask = 0; barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; barrier.image = image.handle(); range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; range.baseMipLevel = 0; range.levelCount = 1; range.baseArrayLayer = 0; range.layerCount = 1; barrier.subresourceRange = range; VkCommandBufferObj cmdbuf(m_device, m_commandPool); cmdbuf.begin(); cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); barrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; barrier.srcAccessMask = 0; barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, IdxBufferAlignmentError) { // Bind a BeginRenderPass within an active RenderPass ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); uint32_t const indices[] = {0}; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.size = 1024; buf_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; buf_info.queueFamilyIndexCount = 1; buf_info.pQueueFamilyIndices = indices; VkBuffer buffer; VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements requirements; vkGetBufferMemoryRequirements(m_device->device(), buffer, &requirements); VkMemoryAllocateInfo alloc_info{}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = requirements.size; bool pass = m_device->phy().set_memory_type(requirements.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); ASSERT_TRUE(pass); VkDeviceMemory memory; err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, memory, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); ASSERT_VK_SUCCESS(err); // vkCmdBindPipeline(m_commandBuffer->handle(), // VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Should error before calling to driver so don't care about actual data m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdBindIndexBuffer() offset (0x7) does not fall on "); vkCmdBindIndexBuffer(m_commandBuffer->handle(), buffer, 7, VK_INDEX_TYPE_UINT16); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), memory, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } TEST_F(VkLayerTest, InvalidQueueFamilyIndex) { // Create an out-of-range queueFamilyIndex ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buffCI.queueFamilyIndexCount = 2; // Introduce failure by specifying invalid queue_family_index uint32_t qfi[2]; qfi[0] = 777; qfi[1] = 0; buffCI.pQueueFamilyIndices = qfi; buffCI.sharingMode = VK_SHARING_MODE_CONCURRENT; // qfi only matters in CONCURRENT mode VkBuffer ib; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateBuffer: pCreateInfo->pQueueFamilyIndices[0] (= 777) is not one of the queue " "families given via VkDeviceQueueCreateInfo structures when the device was created."); vkCreateBuffer(m_device->device(), &buffCI, NULL, &ib); m_errorMonitor->VerifyFound(); if (m_device->queue_props.size() > 2) { VkBuffer ib2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "which was not created allowing concurrent"); // Create buffer shared to queue families 1 and 2, but submitted on queue family 0 buffCI.queueFamilyIndexCount = 2; qfi[0] = 1; qfi[1] = 2; vkCreateBuffer(m_device->device(), &buffCI, NULL, &ib2); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), ib2, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = mem_reqs.size; bool pass = false; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to allocate required memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), ib2, NULL); return; } vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); vkBindBufferMemory(m_device->device(), ib2, mem, 0); m_commandBuffer->begin(); vkCmdFillBuffer(m_commandBuffer->handle(), ib2, 0, 16, 5); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), ib2, NULL); vkFreeMemory(m_device->device(), mem, NULL); } } TEST_F(VkLayerTest, ExecuteCommandsPrimaryCB) { TEST_DESCRIPTION("Attempt vkCmdExecuteCommands with a primary command buffer (should only be secondary)"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // An empty primary command buffer VkCommandBufferObj cb(m_device, m_commandPool); cb.begin(); cb.end(); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &renderPassBeginInfo(), VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); VkCommandBuffer handle = cb.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdExecuteCommands() called w/ Primary Cmd Buffer "); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &handle); m_errorMonitor->VerifyFound(); m_errorMonitor->SetUnexpectedError("All elements of pCommandBuffers must not be in the pending state"); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DSUsageBitsErrors) { TEST_DESCRIPTION("Attempt to update descriptor sets for images and buffers that do not have correct usage bits sets."); ASSERT_NO_FATAL_FAILURE(Init()); std::array<VkDescriptorPoolSize, VK_DESCRIPTOR_TYPE_RANGE_SIZE> ds_type_count; for (uint32_t i = 0; i < ds_type_count.size(); ++i) { ds_type_count[i].type = VkDescriptorType(i); ds_type_count[i].descriptorCount = 1; } vk_testing::DescriptorPool ds_pool; ds_pool.init(*m_device, vk_testing::DescriptorPool::create_info(0, VK_DESCRIPTOR_TYPE_RANGE_SIZE, ds_type_count)); ASSERT_TRUE(ds_pool.initialized()); std::vector<VkDescriptorSetLayoutBinding> dsl_bindings(1); dsl_bindings[0].binding = 0; dsl_bindings[0].descriptorType = VkDescriptorType(0); dsl_bindings[0].descriptorCount = 1; dsl_bindings[0].stageFlags = VK_SHADER_STAGE_ALL; dsl_bindings[0].pImmutableSamplers = NULL; // Create arrays of layout and descriptor objects using UpDescriptorSet = std::unique_ptr<vk_testing::DescriptorSet>; std::vector<UpDescriptorSet> descriptor_sets; using UpDescriptorSetLayout = std::unique_ptr<VkDescriptorSetLayoutObj>; std::vector<UpDescriptorSetLayout> ds_layouts; descriptor_sets.reserve(VK_DESCRIPTOR_TYPE_RANGE_SIZE); ds_layouts.reserve(VK_DESCRIPTOR_TYPE_RANGE_SIZE); for (uint32_t i = 0; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; ++i) { dsl_bindings[0].descriptorType = VkDescriptorType(i); ds_layouts.push_back(UpDescriptorSetLayout(new VkDescriptorSetLayoutObj(m_device, dsl_bindings))); descriptor_sets.push_back(UpDescriptorSet(ds_pool.alloc_sets(*m_device, *ds_layouts.back()))); ASSERT_TRUE(descriptor_sets.back()->initialized()); } // Create a buffer & bufferView to be used for invalid updates const VkDeviceSize buffer_size = 256; uint8_t data[buffer_size]; VkConstantBufferObj buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT); VkConstantBufferObj storage_texel_buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT); ASSERT_TRUE(buffer.initialized() && storage_texel_buffer.initialized()); auto buff_view_ci = vk_testing::BufferView::createInfo(buffer.handle(), VK_FORMAT_R8_UNORM); vk_testing::BufferView buffer_view_obj, storage_texel_buffer_view_obj; buffer_view_obj.init(*m_device, buff_view_ci); buff_view_ci.buffer = storage_texel_buffer.handle(); storage_texel_buffer_view_obj.init(*m_device, buff_view_ci); ASSERT_TRUE(buffer_view_obj.initialized() && storage_texel_buffer_view_obj.initialized()); VkBufferView buffer_view = buffer_view_obj.handle(); VkBufferView storage_texel_buffer_view = storage_texel_buffer_view_obj.handle(); // Create an image to be used for invalid updates VkImageObj image_obj(m_device); image_obj.InitNoLayout(64, 64, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image_obj.initialized()); VkImageView image_view = image_obj.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer.handle(); VkDescriptorImageInfo img_info = {}; img_info.imageView = image_view; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = &buffer_view; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = &img_info; // These error messages align with VkDescriptorType struct std::string error_codes[] = { "VUID-VkWriteDescriptorSet-descriptorType-00326", // placeholder, no error for SAMPLER descriptor "VUID-VkWriteDescriptorSet-descriptorType-00326", // COMBINED_IMAGE_SAMPLER "VUID-VkWriteDescriptorSet-descriptorType-00326", // SAMPLED_IMAGE "VUID-VkWriteDescriptorSet-descriptorType-00326", // STORAGE_IMAGE "VUID-VkWriteDescriptorSet-descriptorType-00334", // UNIFORM_TEXEL_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00335", // STORAGE_TEXEL_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00330", // UNIFORM_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00331", // STORAGE_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00330", // UNIFORM_BUFFER_DYNAMIC "VUID-VkWriteDescriptorSet-descriptorType-00331", // STORAGE_BUFFER_DYNAMIC "VUID-VkWriteDescriptorSet-descriptorType-00326" // INPUT_ATTACHMENT }; // Start loop at 1 as SAMPLER desc type has no usage bit error for (uint32_t i = 1; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; ++i) { if (VkDescriptorType(i) == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { // Now check for UNIFORM_TEXEL_BUFFER using storage_texel_buffer_view descriptor_write.pTexelBufferView = &storage_texel_buffer_view; } descriptor_write.descriptorType = VkDescriptorType(i); descriptor_write.dstSet = descriptor_sets[i]->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error_codes[i]); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); if (VkDescriptorType(i) == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { descriptor_write.pTexelBufferView = &buffer_view; } } } TEST_F(VkLayerTest, DSBufferInfoErrors) { TEST_DESCRIPTION( "Attempt to update buffer descriptor set that has incorrect parameters in VkDescriptorBufferInfo struct. This includes:\n" "1. offset value greater than or equal to buffer size\n" "2. range value of 0\n" "3. range value greater than buffer (size - offset)"); VkResult err; // GPDDP2 needed for push descriptors support below bool gpdp2_support = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION); if (gpdp2_support) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool update_template_support = DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME); if (update_template_support) { m_device_extension_names.push_back(VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME); } else { printf("%s Descriptor Update Template Extensions not supported, template cases skipped.\n", kSkipPrefix); } bool push_descriptor_support = gpdp2_support && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); if (push_descriptor_support) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptor Extension not supported, push descriptor cases skipped.\n", kSkipPrefix); } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); std::vector<VkDescriptorSetLayoutBinding> ds_bindings = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; OneOffDescriptorSet ds(m_device, ds_bindings); // Create a buffer to be used for invalid updates VkBufferCreateInfo buff_ci = {}; buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buff_ci.size = m_device->props.limits.minUniformBufferOffsetAlignment; buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Have to bind memory to buffer before descriptor update VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = mem_reqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = ds.set_; // Relying on the "return nullptr for non-enabled extensions auto vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateDescriptorUpdateTemplateKHR"); auto vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkDestroyDescriptorUpdateTemplateKHR"); auto vkUpdateDescriptorSetWithTemplateKHR = (PFN_vkUpdateDescriptorSetWithTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkUpdateDescriptorSetWithTemplateKHR"); if (update_template_support) { ASSERT_NE(vkCreateDescriptorUpdateTemplateKHR, nullptr); ASSERT_NE(vkDestroyDescriptorUpdateTemplateKHR, nullptr); ASSERT_NE(vkUpdateDescriptorSetWithTemplateKHR, nullptr); } // Setup for update w/ template tests // Create a template of descriptor set updates struct SimpleTemplateData { uint8_t padding[7]; VkDescriptorBufferInfo buff_info; uint32_t other_padding[4]; }; SimpleTemplateData update_template_data = {}; VkDescriptorUpdateTemplateEntry update_template_entry = {}; update_template_entry.dstBinding = 0; update_template_entry.dstArrayElement = 0; update_template_entry.descriptorCount = 1; update_template_entry.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; update_template_entry.offset = offsetof(SimpleTemplateData, buff_info); update_template_entry.stride = sizeof(SimpleTemplateData); auto update_template_ci = lvl_init_struct<VkDescriptorUpdateTemplateCreateInfoKHR>(); update_template_ci.descriptorUpdateEntryCount = 1; update_template_ci.pDescriptorUpdateEntries = &update_template_entry; update_template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET; update_template_ci.descriptorSetLayout = ds.layout_.handle(); VkDescriptorUpdateTemplate update_template = VK_NULL_HANDLE; if (update_template_support) { auto result = vkCreateDescriptorUpdateTemplateKHR(m_device->device(), &update_template_ci, nullptr, &update_template); ASSERT_VK_SUCCESS(result); } // VK_KHR_push_descriptor support auto vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); auto vkCmdPushDescriptorSetWithTemplateKHR = (PFN_vkCmdPushDescriptorSetWithTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetWithTemplateKHR"); std::unique_ptr<VkDescriptorSetLayoutObj> push_dsl = nullptr; std::unique_ptr<VkPipelineLayoutObj> pipeline_layout = nullptr; VkDescriptorUpdateTemplate push_template = VK_NULL_HANDLE; if (push_descriptor_support) { ASSERT_NE(vkCmdPushDescriptorSetKHR, nullptr); push_dsl.reset( new VkDescriptorSetLayoutObj(m_device, ds_bindings, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR)); pipeline_layout.reset(new VkPipelineLayoutObj(m_device, {push_dsl.get()})); ASSERT_TRUE(push_dsl->initialized()); if (update_template_support) { ASSERT_NE(vkCmdPushDescriptorSetWithTemplateKHR, nullptr); auto push_template_ci = lvl_init_struct<VkDescriptorUpdateTemplateCreateInfoKHR>(); push_template_ci.descriptorUpdateEntryCount = 1; push_template_ci.pDescriptorUpdateEntries = &update_template_entry; push_template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR; push_template_ci.descriptorSetLayout = VK_NULL_HANDLE; push_template_ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; push_template_ci.pipelineLayout = pipeline_layout->handle(); push_template_ci.set = 0; auto result = vkCreateDescriptorUpdateTemplateKHR(m_device->device(), &push_template_ci, nullptr, &push_template); ASSERT_VK_SUCCESS(result); } } auto do_test = [&](const char *desired_failure) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); if (push_descriptor_support) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); m_commandBuffer->begin(); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout->handle(), 0, 1, &descriptor_write); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } if (update_template_support) { update_template_data.buff_info = buff_info; // copy the test case information into our "pData" m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); vkUpdateDescriptorSetWithTemplateKHR(m_device->device(), ds.set_, update_template, &update_template_data); m_errorMonitor->VerifyFound(); if (push_descriptor_support) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); m_commandBuffer->begin(); vkCmdPushDescriptorSetWithTemplateKHR(m_commandBuffer->handle(), push_template, pipeline_layout->handle(), 0, &update_template_data); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } } }; // Cause error due to offset out of range buff_info.offset = buff_ci.size; buff_info.range = VK_WHOLE_SIZE; do_test("VUID-VkDescriptorBufferInfo-offset-00340"); // Now cause error due to range of 0 buff_info.offset = 0; buff_info.range = 0; do_test("VUID-VkDescriptorBufferInfo-range-00341"); // Now cause error due to range exceeding buffer size - offset buff_info.offset = 0; buff_info.range = buff_ci.size + 1; do_test("VUID-VkDescriptorBufferInfo-range-00342"); if (update_template_support) { vkDestroyDescriptorUpdateTemplateKHR(m_device->device(), update_template, nullptr); if (push_descriptor_support) { vkDestroyDescriptorUpdateTemplateKHR(m_device->device(), push_template, nullptr); } } vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } TEST_F(VkLayerTest, DSBufferLimitErrors) { TEST_DESCRIPTION( "Attempt to update buffer descriptor set that has VkDescriptorBufferInfo values that violate device limits.\n" "Test cases include:\n" "1. range of uniform buffer update exceeds maxUniformBufferRange\n" "2. offset of uniform buffer update is not multiple of minUniformBufferOffsetAlignment\n" "3. using VK_WHOLE_SIZE with uniform buffer size exceeding maxUniformBufferRange\n" "4. range of storage buffer update exceeds maxStorageBufferRange\n" "5. offset of storage buffer update is not multiple of minStorageBufferOffsetAlignment\n" "6. using VK_WHOLE_SIZE with storage buffer size exceeding maxStorageBufferRange"); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); struct TestCase { VkDescriptorType descriptor_type; VkBufferUsageFlagBits buffer_usage; VkDeviceSize max_range; std::string max_range_vu; VkDeviceSize min_align; std::string min_align_vu; }; for (const auto &test_case : { TestCase({VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, m_device->props.limits.maxUniformBufferRange, "VUID-VkWriteDescriptorSet-descriptorType-00332", m_device->props.limits.minUniformBufferOffsetAlignment, "VUID-VkWriteDescriptorSet-descriptorType-00327"}), TestCase({VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, m_device->props.limits.maxStorageBufferRange, "VUID-VkWriteDescriptorSet-descriptorType-00333", m_device->props.limits.minStorageBufferOffsetAlignment, "VUID-VkWriteDescriptorSet-descriptorType-00328"}), }) { // Create layout with single buffer OneOffDescriptorSet ds(m_device, { {0, test_case.descriptor_type, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create a buffer to be used for invalid updates VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = test_case.buffer_usage; bci.size = test_case.max_range + test_case.min_align; // Make buffer bigger than range limit bci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &bci, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Have to bind memory to buffer before descriptor update VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = mem_reqs.size; bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory in DSBufferLimitErrors; skipped.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); continue; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); if (VK_SUCCESS != err) { printf("%s Failed to allocate memory in DSBufferLimitErrors; skipped.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); continue; } err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = test_case.descriptor_type; descriptor_write.dstSet = ds.set_; // Exceed range limit if (test_case.max_range != UINT32_MAX) { buff_info.range = test_case.max_range + 1; buff_info.offset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.max_range_vu); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } // Reduce size of range to acceptable limit and cause offset error if (test_case.min_align > 1) { buff_info.range = test_case.max_range; buff_info.offset = test_case.min_align - 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.min_align_vu); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } // Exceed effective range limit by using VK_WHOLE_SIZE buff_info.range = VK_WHOLE_SIZE; buff_info.offset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.max_range_vu); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Cleanup vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } } TEST_F(VkLayerTest, DSAspectBitsErrors) { // TODO : Initially only catching case where DEPTH & STENCIL aspect bits // are set, but could expand this test to hit more cases. TEST_DESCRIPTION("Attempt to update descriptor sets for images that do not have correct aspect bits sets."); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create an image to be used for invalid updates VkImageObj image_obj(m_device); image_obj.Init(64, 64, 1, depth_format, VK_IMAGE_USAGE_SAMPLED_BIT); if (!image_obj.initialized()) { printf("%s Depth + Stencil format cannot be sampled. Skipped.\n", kSkipPrefix); return; } VkImage image = image_obj.image(); // Now create view for image VkImageViewCreateInfo image_view_ci = {}; image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_ci.image = image; image_view_ci.format = depth_format; image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_ci.subresourceRange.layerCount = 1; image_view_ci.subresourceRange.baseArrayLayer = 0; image_view_ci.subresourceRange.levelCount = 1; // Setting both depth & stencil aspect bits is illegal for an imageView used // to populate a descriptor set. image_view_ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; VkImageView image_view; err = vkCreateImageView(m_device->device(), &image_view_ci, NULL, &image_view); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo img_info = {}; img_info.imageView = image_view; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = NULL; descriptor_write.pBufferInfo = NULL; descriptor_write.pImageInfo = &img_info; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; descriptor_write.dstSet = ds.set_; // TODO(whenning42): Update this check to look for a VUID when this error is // assigned one. const char *error_msg = " please only set either VK_IMAGE_ASPECT_DEPTH_BIT or VK_IMAGE_ASPECT_STENCIL_BIT "; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error_msg); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroyImageView(m_device->device(), image_view, NULL); } TEST_F(VkLayerTest, DSTypeMismatch) { // Create DS w/ layout of one type and attempt Update w/ mis-matched type VkResult err; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding #0 with type VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER but update type is VK_DESCRIPTOR_TYPE_SAMPLER"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.descriptorCount = 1; // This is a mismatched type for the layout which expects BUFFER descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, DSUpdateOutOfBounds) { // For overlapping Update, have arrayIndex exceed that of layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); if (!buffer_test.GetBufferCurrent()) { // Something prevented creation of buffer so abort printf("%s Buffer creation failed, skipping test\n", kSkipPrefix); return; } // Correctly update descriptor to avoid "NOT_UPDATED" error VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer_test.GetBuffer(); buff_info.offset = 0; buff_info.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstArrayElement = 1; /* This index out of bounds for the update */ descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = &buff_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidDSUpdateIndex) { // Create layout w/ count of 1 and attempt update to that layout w/ binding index 2 VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstBinding-00315"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; // This is the wrong type, but out of bounds will be flagged first descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, DSUpdateEmptyBinding) { // Create layout w/ empty binding and attempt to update it VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 0 /* !! */, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; // Lie here to avoid parameter_validation error // This is the wrong type, but empty binding error will be flagged first descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstBinding-00316"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, InvalidDSUpdateStruct) { // Call UpdateDS w/ struct type other than valid VK_STRUCTUR_TYPE_UPDATE_* // types VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, ".sType must be VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; /* Intentionally broken struct type */ descriptor_write.dstSet = ds.set_; descriptor_write.descriptorCount = 1; // This is the wrong type, but out of bounds will be flagged first descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, SampleDescriptorUpdateError) { // Create a single Sampler descriptor and send it an invalid Sampler m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00325"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSampler sampler = (VkSampler)((size_t)0xbaadbeef); // Sampler with invalid handle VkDescriptorImageInfo descriptor_info; memset(&descriptor_info, 0, sizeof(VkDescriptorImageInfo)); descriptor_info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &descriptor_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageViewDescriptorUpdateError) { // Create a single combined Image/Sampler descriptor and send it an invalid // imageView VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00326"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkImageView view = (VkImageView)((size_t)0xbaadbeef); // invalid imageView object VkDescriptorImageInfo descriptor_info; memset(&descriptor_info, 0, sizeof(VkDescriptorImageInfo)); descriptor_info.sampler = sampler; descriptor_info.imageView = view; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &descriptor_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, CopyDescriptorUpdateErrors) { // Create DS w/ layout of 2 types, write update 1 and attempt to copy-update // into the other VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding #1 with type VK_DESCRIPTOR_TYPE_SAMPLER. Types do not match."); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(VkWriteDescriptorSet)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 1; // SAMPLER binding from layout above descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; // This write update should succeed vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Now perform a copy update that fails due to type mismatch VkCopyDescriptorSet copy_ds_update; memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = ds.set_; copy_ds_update.srcBinding = 1; // Copy from SAMPLER binding copy_ds_update.dstSet = ds.set_; copy_ds_update.dstBinding = 0; // ERROR : copy to UNIFORM binding copy_ds_update.descriptorCount = 1; // copy 1 descriptor vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); // Now perform a copy update that fails due to binding out of bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " does not have copy update src binding of 3."); memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = ds.set_; copy_ds_update.srcBinding = 3; // ERROR : Invalid binding for matching layout copy_ds_update.dstSet = ds.set_; copy_ds_update.dstBinding = 0; copy_ds_update.descriptorCount = 1; // Copy 1 descriptor vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); // Now perform a copy update that fails due to binding out of bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding#1 with offset index of 1 plus update array offset of 0 and update of 5 " "descriptors oversteps total number of descriptors in set: 2."); memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = ds.set_; copy_ds_update.srcBinding = 1; copy_ds_update.dstSet = ds.set_; copy_ds_update.dstBinding = 0; copy_ds_update.descriptorCount = 5; // ERROR copy 5 descriptors (out of bounds for layout) vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkPositiveLayerTest, CopyNonupdatedDescriptors) { TEST_DESCRIPTION("Copy non-updated descriptors"); unsigned int i; ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet src_ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}, {2, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); OneOffDescriptorSet dst_ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}, }); m_errorMonitor->ExpectSuccess(); const unsigned int copy_size = 2; VkCopyDescriptorSet copy_ds_update[copy_size]; memset(copy_ds_update, 0, sizeof(copy_ds_update)); for (i = 0; i < copy_size; i++) { copy_ds_update[i].sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update[i].srcSet = src_ds.set_; copy_ds_update[i].srcBinding = i; copy_ds_update[i].dstSet = dst_ds.set_; copy_ds_update[i].dstBinding = i; copy_ds_update[i].descriptorCount = 1; } vkUpdateDescriptorSets(m_device->device(), 0, NULL, copy_size, copy_ds_update); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, NumSamplesMismatch) { // Create CommandBuffer where MSAA samples doesn't match RenderPass // sampleCount m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Num samples mismatch! "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // Render triangle (the error should trigger on the attempt to draw). m_commandBuffer->Draw(3, 1, 0, 0); // Finalize recording of the command buffer m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DrawWithPipelineIncompatibleWithRenderPass) { TEST_DESCRIPTION( "Hit RenderPass incompatible cases. Initial case is drawing with an active renderpass that's not compatible with the bound " "pipeline state object's creation renderpass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices // Create a renderpass that will be incompatible with default renderpass VkAttachmentReference color_att = {}; color_att.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_att; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; // Format incompatible with PSO RP color attach format B8G8R8A8_UNORM attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {{0, 0}, {64, 64}}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); pipe.CreateVKPipeline(pipeline_layout.handle(), rp); VkCommandBufferInheritanceInfo cbii = {}; cbii.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; cbii.renderPass = rp; cbii.subpass = 0; VkCommandBufferBeginInfo cbbi = {}; cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cbbi.pInheritanceInfo = &cbii; vkBeginCommandBuffer(m_commandBuffer->handle(), &cbbi); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDraw-renderPass-00435"); // Render triangle (the error should trigger on the attempt to draw). m_commandBuffer->Draw(3, 1, 0, 0); // Finalize recording of the command buffer m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, NumBlendAttachMismatch) { // Create Pipeline where the number of blend attachments doesn't match the // number of color attachments. In this case, we don't add any color // blend attachments even though we have a color attachment. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.SetMSAA(&pipe_ms_state_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, Bad2DArrayImageType) { TEST_DESCRIPTION("Create an image with a flag specifying 2D_ARRAY_COMPATIBLE but not of imageType 3D."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s %s is not supported; skipping\n", kSkipPrefix, VK_KHR_MAINTENANCE1_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Trigger check by setting imagecreateflags to 2d_array_compat and imageType to 2D VkImageCreateInfo ici = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR, VK_IMAGE_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {32, 32, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_SAMPLED_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00950"); VkImage image; vkCreateImage(m_device->device(), &ici, NULL, &image); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, Maint1BindingSliceOf3DImage) { TEST_DESCRIPTION( "Attempt to bind a slice of a 3D texture in a descriptor set. This is explicitly disallowed by KHR_maintenance1 to keep " "things simple for drivers."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s %s is not supported; skipping\n", kSkipPrefix, VK_KHR_MAINTENANCE1_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkResult err; OneOffDescriptorSet set(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkImageCreateInfo ici = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR, VK_IMAGE_TYPE_3D, VK_FORMAT_R8G8B8A8_UNORM, {32, 32, 32}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_SAMPLED_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&ici); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); // Meat of the test. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorImageInfo-imageView-00343"); VkDescriptorImageInfo dii = {VK_NULL_HANDLE, view, VK_IMAGE_LAYOUT_GENERAL}; VkWriteDescriptorSet write = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, nullptr, set.set_, 0, 0, 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, &dii, nullptr, nullptr}; vkUpdateDescriptorSets(m_device->device(), 1, &write, 0, nullptr); m_errorMonitor->VerifyFound(); vkDestroyImageView(m_device->device(), view, nullptr); } TEST_F(VkLayerTest, MissingClearAttachment) { TEST_DESCRIPTION("Points to a wrong colorAttachment index in a VkClearAttachment structure passed to vkCmdClearAttachments"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-aspectMask-02501"); VKTriangleTest(BsoFailCmdClearAttachments); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, ConfirmNoVLErrorWhenVkCmdClearAttachmentsCalledInSecondaryCB) { TEST_DESCRIPTION( "This test is to verify that when vkCmdClearAttachments is called by a secondary commandbuffer, the validation layers do " "not throw an error if the primary commandbuffer begins a renderpass before executing the secondary commandbuffer."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferBeginInfo info = {}; VkCommandBufferInheritanceInfo hinfo = {}; info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.pInheritanceInfo = &hinfo; hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; hinfo.pNext = NULL; hinfo.renderPass = renderPass(); hinfo.subpass = 0; hinfo.framebuffer = m_framebuffer; hinfo.occlusionQueryEnable = VK_FALSE; hinfo.queryFlags = 0; hinfo.pipelineStatistics = 0; secondary.begin(&info); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0.0; color_attachment.clearValue.color.float32[1] = 0.0; color_attachment.clearValue.color.float32[2] = 0.0; color_attachment.clearValue.color.float32[3] = 0.0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1}; vkCmdClearAttachments(secondary.handle(), 1, &color_attachment, 1, &clear_rect); secondary.end(); // Modify clear rect here to verify that it doesn't cause validation error clear_rect = {{{0, 0}, {99999999, 99999999}}, 0, 0}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CmdClearAttachmentTests) { TEST_DESCRIPTION("Various tests for validating usage of vkCmdClearAttachments"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); // We shouldn't need a fragment shader but add it to be able to run // on more devices VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Main thing we care about for this test is that the VkImage obj we're // clearing matches Color Attachment of FB // Also pass down other dummy params to keep driver and paramchecker happy VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 1.0; color_attachment.clearValue.color.float32[1] = 1.0; color_attachment.clearValue.color.float32[2] = 1.0; color_attachment.clearValue.color.float32[3] = 1.0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1}; // Call for full-sized FB Color attachment prior to issuing a Draw m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "vkCmdClearAttachments() issued on command buffer object "); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); clear_rect.rect.extent.width = renderPassBeginInfo().renderArea.extent.width + 4; clear_rect.rect.extent.height = clear_rect.rect.extent.height / 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00016"); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); // baseLayer >= view layers clear_rect.rect.extent.width = (uint32_t)m_width; clear_rect.baseArrayLayer = 1; clear_rect.layerCount = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00017"); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); // baseLayer + layerCount > view layers clear_rect.rect.extent.width = (uint32_t)m_width; clear_rect.baseArrayLayer = 0; clear_rect.layerCount = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00017"); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, VtxBufferBadIndex) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "but no vertex buffers are attached to this Pipeline State Object"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Don't care about actual data, just need to get to draw to flag error static const float vbo_data[3] = {1.f, 0.f, 1.f}; VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); m_commandBuffer->BindVertexBuffer(&vbo, (VkDeviceSize)0, 1); // VBO idx 1, but no VBO in PSO m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidQueryPoolCreate) { TEST_DESCRIPTION("Attempt to create a query pool for PIPELINE_STATISTICS without enabling pipeline stats for the device."); ASSERT_NO_FATAL_FAILURE(Init()); vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props); VkDevice local_device; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); // Intentionally disable pipeline stats features.pipelineStatisticsQuery = VK_FALSE; device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = queue_info.size(); device_create_info.pQueueCreateInfos = queue_info.data(); device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.pEnabledFeatures = &features; VkResult err = vkCreateDevice(gpu(), &device_create_info, nullptr, &local_device); ASSERT_VK_SUCCESS(err); VkQueryPoolCreateInfo qpci{}; qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; qpci.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS; qpci.queryCount = 1; VkQueryPool query_pool; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkQueryPoolCreateInfo-queryType-00791"); vkCreateQueryPool(local_device, &qpci, nullptr, &query_pool); m_errorMonitor->VerifyFound(); vkDestroyDevice(local_device, nullptr); } TEST_F(VkLayerTest, UnclosedQuery) { TEST_DESCRIPTION("End a command buffer with a query still in progress."); const char *invalid_query = "Ending command buffer with in progress query: queryPool 0x"; ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_query); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info = {}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION; query_pool_create_info.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool); vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0 /*startQuery*/, 1 /*queryCount*/); vkCmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0); vkEndCommandBuffer(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); vkDestroyQueryPool(m_device->device(), query_pool, nullptr); vkDestroyEvent(m_device->device(), event, nullptr); } TEST_F(VkLayerTest, QueryPreciseBit) { TEST_DESCRIPTION("Check for correct Query Precise Bit circumstances."); ASSERT_NO_FATAL_FAILURE(Init()); std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Test for precise bit when query type is not OCCLUSION if (features.occlusionQueryPrecise) { VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->handle(), &event_create_info, nullptr, &event); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBeginQuery-queryType-00800"); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info = {}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS; query_pool_create_info.queryCount = 1; vkCreateQueryPool(m_device->handle(), &query_pool_create_info, nullptr, &query_pool); vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1); vkCmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, VK_QUERY_CONTROL_PRECISE_BIT); vkCmdEndQuery(m_commandBuffer->handle(), query_pool, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); vkDestroyQueryPool(m_device->handle(), query_pool, nullptr); vkDestroyEvent(m_device->handle(), event, nullptr); } // Test for precise bit when precise feature is not available features.occlusionQueryPrecise = false; VkDeviceObj test_device(0, gpu(), device_extension_names, &features); VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = test_device.graphics_queue_node_index_; VkCommandPool command_pool; vkCreateCommandPool(test_device.handle(), &pool_create_info, nullptr, &command_pool); VkCommandBufferAllocateInfo cmd = {}; cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmd.pNext = NULL; cmd.commandPool = command_pool; cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cmd.commandBufferCount = 1; VkCommandBuffer cmd_buffer; VkResult err = vkAllocateCommandBuffers(test_device.handle(), &cmd, &cmd_buffer); ASSERT_VK_SUCCESS(err); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(test_device.handle(), &event_create_info, nullptr, &event); VkCommandBufferBeginInfo begin_info = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, nullptr}; vkBeginCommandBuffer(cmd_buffer, &begin_info); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBeginQuery-queryType-00800"); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info = {}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION; query_pool_create_info.queryCount = 1; vkCreateQueryPool(test_device.handle(), &query_pool_create_info, nullptr, &query_pool); vkCmdResetQueryPool(cmd_buffer, query_pool, 0, 1); vkCmdBeginQuery(cmd_buffer, query_pool, 0, VK_QUERY_CONTROL_PRECISE_BIT); vkCmdEndQuery(cmd_buffer, query_pool, 0); m_errorMonitor->VerifyFound(); vkEndCommandBuffer(cmd_buffer); vkDestroyQueryPool(test_device.handle(), query_pool, nullptr); vkDestroyEvent(test_device.handle(), event, nullptr); vkDestroyCommandPool(test_device.handle(), command_pool, nullptr); } TEST_F(VkLayerTest, VertexBufferInvalid) { TEST_DESCRIPTION( "Submit a command buffer using deleted vertex buffer, delete a buffer twice, use an invalid offset for each buffer type, " "and attempt to bind a null buffer"); const char *deleted_buffer_in_command_buffer = "Cannot submit cmd buffer using deleted buffer "; const char *invalid_offset_message = "VUID-vkBindBufferMemory-memoryOffset-01036"; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = nullptr; const VkPipelineLayoutObj pipeline_layout(m_device); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); { // Create and bind a vertex buffer in a reduced scope, which will cause // it to be deleted upon leaving this scope const float vbo_data[3] = {1.f, 0.f, 1.f}; VkVerticesObj draw_verticies(m_device, 1, 1, sizeof(vbo_data[0]), sizeof(vbo_data) / sizeof(vbo_data[0]), vbo_data); draw_verticies.BindVertexBuffers(m_commandBuffer->handle()); draw_verticies.AddVertexInputToPipe(pipe); } m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, deleted_buffer_in_command_buffer); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); { // Create and bind a vertex buffer in a reduced scope, and delete it // twice, the second through the destructor VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VkBufferTest::eDoubleDelete); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyBuffer-buffer-parameter"); buffer_test.TestDoubleDestroy(); } m_errorMonitor->VerifyFound(); m_errorMonitor->SetUnexpectedError("value of pCreateInfo->usage must not be 0"); if (VkBufferTest::GetTestConditionValid(m_device, VkBufferTest::eInvalidMemoryOffset)) { // Create and bind a memory buffer with an invalid offset. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_offset_message); m_errorMonitor->SetUnexpectedError( "If buffer was created with the VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT or VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, " "memoryOffset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment"); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VkBufferTest::eInvalidMemoryOffset); (void)buffer_test; m_errorMonitor->VerifyFound(); } { // Attempt to bind a null buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkBindBufferMemory: required parameter buffer specified as VK_NULL_HANDLE"); VkBufferTest buffer_test(m_device, 0, VkBufferTest::eBindNullBuffer); (void)buffer_test; m_errorMonitor->VerifyFound(); } { // Attempt to bind a fake buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-parameter"); VkBufferTest buffer_test(m_device, 0, VkBufferTest::eBindFakeBuffer); (void)buffer_test; m_errorMonitor->VerifyFound(); } { // Attempt to use an invalid handle to delete a buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkFreeMemory-memory-parameter"); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VkBufferTest::eFreeInvalidHandle); (void)buffer_test; } m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, BadVertexBufferOffset) { TEST_DESCRIPTION("Submit an offset past the end of a vertex buffer"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); static const float vbo_data[3] = {1.f, 0.f, 1.f}; VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindVertexBuffers-pOffsets-00626"); m_commandBuffer->BindVertexBuffer(&vbo, (VkDeviceSize)(3 * sizeof(float)), 1); // Offset at the end of the buffer m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidVertexAttributeAlignment) { TEST_DESCRIPTION("Check for proper aligment of attribAddress which depends on a bound pipeline and on a bound vertex buffer"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkPipelineLayoutObj pipeline_layout(m_device); struct VboEntry { uint16_t input0[2]; uint32_t input1; float input2[4]; }; const unsigned vbo_entry_count = 3; const VboEntry vbo_data[vbo_entry_count] = {}; VkConstantBufferObj vbo(m_device, static_cast<int>(sizeof(VboEntry) * vbo_entry_count), reinterpret_cast<const void *>(vbo_data), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); VkVertexInputBindingDescription input_binding; input_binding.binding = 0; input_binding.stride = sizeof(VboEntry); input_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; VkVertexInputAttributeDescription input_attribs[3]; input_attribs[0].binding = 0; // Location switch between attrib[0] and attrib[1] is intentional input_attribs[0].location = 1; input_attribs[0].format = VK_FORMAT_A8B8G8R8_UNORM_PACK32; input_attribs[0].offset = offsetof(VboEntry, input1); input_attribs[1].binding = 0; input_attribs[1].location = 0; input_attribs[1].format = VK_FORMAT_R16G16_UNORM; input_attribs[1].offset = offsetof(VboEntry, input0); input_attribs[2].binding = 0; input_attribs[2].location = 2; input_attribs[2].format = VK_FORMAT_R32G32B32A32_SFLOAT; input_attribs[2].offset = offsetof(VboEntry, input2); char const *vsSource = "#version 450\n" "\n" "layout(location = 0) in vec2 input0;" "layout(location = 1) in vec4 input1;" "layout(location = 2) in vec4 input2;" "\n" "void main(){\n" " gl_Position = input1 + input2;\n" " gl_Position.xy += input0;\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe1(m_device); pipe1.AddDefaultColorAttachment(); pipe1.AddShader(&vs); pipe1.AddShader(&fs); pipe1.AddVertexInputBindings(&input_binding, 1); pipe1.AddVertexInputAttribs(&input_attribs[0], 3); pipe1.SetViewport(m_viewports); pipe1.SetScissor(m_scissors); pipe1.CreateVKPipeline(pipeline_layout.handle(), renderPass()); input_binding.stride = 6; VkPipelineObj pipe2(m_device); pipe2.AddDefaultColorAttachment(); pipe2.AddShader(&vs); pipe2.AddShader(&fs); pipe2.AddVertexInputBindings(&input_binding, 1); pipe2.AddVertexInputAttribs(&input_attribs[0], 3); pipe2.SetViewport(m_viewports); pipe2.SetScissor(m_scissors); pipe2.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Test with invalid buffer offset VkDeviceSize offset = 1; vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe1.handle()); vkCmdBindVertexBuffers(m_commandBuffer->handle(), 0, 1, &vbo.handle(), &offset); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid attribAddress alignment for vertex attribute 0"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid attribAddress alignment for vertex attribute 1"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid attribAddress alignment for vertex attribute 2"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // Test with invalid buffer stride offset = 0; vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe2.handle()); vkCmdBindVertexBuffers(m_commandBuffer->handle(), 0, 1, &vbo.handle(), &offset); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid attribAddress alignment for vertex attribute 0"); // Attribute[1] is aligned properly even with a wrong stride m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid attribAddress alignment for vertex attribute 2"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidVertexBindingDescriptions) { TEST_DESCRIPTION( "Attempt to create a graphics pipeline where:" "1) count of vertex bindings exceeds device's maxVertexInputBindings limit" "2) requested bindings include a duplicate binding value"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkPipelineLayoutObj pipeline_layout(m_device); const uint32_t binding_count = m_device->props.limits.maxVertexInputBindings + 1; std::vector<VkVertexInputBindingDescription> input_bindings(binding_count); for (uint32_t i = 0; i < binding_count; ++i) { input_bindings[i].binding = i; input_bindings[i].stride = 4; input_bindings[i].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; } // Let the last binding description use same binding as the first one input_bindings[binding_count - 1].binding = 0; VkVertexInputAttributeDescription input_attrib; input_attrib.binding = 0; input_attrib.location = 0; input_attrib.format = VK_FORMAT_R32G32B32_SFLOAT; input_attrib.offset = 0; VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(input_bindings.data(), binding_count); pipe.AddVertexInputAttribs(&input_attrib, 1); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-vertexBindingDescriptionCount-00613"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-pVertexBindingDescriptions-00616"); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidVertexAttributeDescriptions) { TEST_DESCRIPTION( "Attempt to create a graphics pipeline where:" "1) count of vertex attributes exceeds device's maxVertexInputAttributes limit" "2) requested location include a duplicate location value" "3) binding used by one attribute is not defined by a binding description"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkPipelineLayoutObj pipeline_layout(m_device); VkVertexInputBindingDescription input_binding; input_binding.binding = 0; input_binding.stride = 4; input_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; const uint32_t attribute_count = m_device->props.limits.maxVertexInputAttributes + 1; std::vector<VkVertexInputAttributeDescription> input_attribs(attribute_count); for (uint32_t i = 0; i < attribute_count; ++i) { input_attribs[i].binding = 0; input_attribs[i].location = i; input_attribs[i].format = VK_FORMAT_R32G32B32_SFLOAT; input_attribs[i].offset = 0; } // Let the last input_attribs description use same location as the first one input_attribs[attribute_count - 1].location = 0; // Let the last input_attribs description use binding which is not defined input_attribs[attribute_count - 1].binding = 1; VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(input_attribs.data(), attribute_count); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-vertexAttributeDescriptionCount-00614"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-binding-00615"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-pVertexAttributeDescriptions-00617"); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } // INVALID_IMAGE_LAYOUT tests (one other case is hit by MapMemWithoutHostVisibleBit and not here) TEST_F(VkLayerTest, InvalidImageLayout) { TEST_DESCRIPTION( "Hit all possible validation checks associated with the UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout error. " "Generally these involve having images in the wrong layout when they're copied or transitioned."); // 3 in ValidateCmdBufImageLayouts // * -1 Attempt to submit cmd buf w/ deleted image // * -2 Cmd buf submit of image w/ layout not matching first use w/ subresource // * -3 Cmd buf submit of image w/ layout not matching first use w/o subresource ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } // Create src & dst images to use for copy operations VkImage src_image; VkImage dst_image; VkImage depth_image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 4; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_create_info.flags = 0; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &src_image); ASSERT_VK_SUCCESS(err); image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dst_image); ASSERT_VK_SUCCESS(err); image_create_info.format = VK_FORMAT_D16_UNORM; image_create_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &depth_image); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryRequirements img_mem_reqs = {}; VkMemoryAllocateInfo mem_alloc = {}; VkDeviceMemory src_image_mem, dst_image_mem, depth_image_mem; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), src_image, &img_mem_reqs); mem_alloc.allocationSize = img_mem_reqs.size; bool pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &src_image_mem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dst_image, &img_mem_reqs); mem_alloc.allocationSize = img_mem_reqs.size; pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &dst_image_mem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), depth_image, &img_mem_reqs); mem_alloc.allocationSize = img_mem_reqs.size; pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &depth_image_mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), src_image, src_image_mem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dst_image, dst_image_mem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), depth_image, depth_image_mem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.srcOffset.x = 0; copy_region.srcOffset.y = 0; copy_region.srcOffset.z = 0; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.mipLevel = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.dstSubresource.layerCount = 1; copy_region.dstOffset.x = 0; copy_region.dstOffset.y = 0; copy_region.dstOffset.z = 0; copy_region.extent.width = 1; copy_region.extent.height = 1; copy_region.extent.depth = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL."); m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // The first call hits the expected WARNING and skips the call down the chain, so call a second time to call down chain and // update layer state m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL."); m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); // Now cause error due to src image layout changing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImageLayout-00128"); m_errorMonitor->SetUnexpectedError("is VK_IMAGE_LAYOUT_UNDEFINED but can only be VK_IMAGE_LAYOUT"); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_UNDEFINED, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Final src error is due to bad layout type m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImageLayout-00129"); m_errorMonitor->SetUnexpectedError( "with specific layout VK_IMAGE_LAYOUT_UNDEFINED that doesn't match the actual current layout VK_IMAGE_LAYOUT_GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_UNDEFINED, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Now verify same checks for dst m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL."); m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Now cause error due to src image layout changing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstImageLayout-00133"); m_errorMonitor->SetUnexpectedError( "is VK_IMAGE_LAYOUT_UNDEFINED but can only be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL or VK_IMAGE_LAYOUT_GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_UNDEFINED, 1, &copy_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstImageLayout-00134"); m_errorMonitor->SetUnexpectedError( "with specific layout VK_IMAGE_LAYOUT_UNDEFINED that doesn't match the actual current layout VK_IMAGE_LAYOUT_GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_UNDEFINED, 1, &copy_region); m_errorMonitor->VerifyFound(); // Convert dst and depth images to TRANSFER_DST for subsequent tests VkImageMemoryBarrier transfer_dst_image_barrier[1] = {}; transfer_dst_image_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; transfer_dst_image_barrier[0].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; transfer_dst_image_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; transfer_dst_image_barrier[0].srcAccessMask = 0; transfer_dst_image_barrier[0].dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; transfer_dst_image_barrier[0].image = dst_image; transfer_dst_image_barrier[0].subresourceRange.layerCount = image_create_info.arrayLayers; transfer_dst_image_barrier[0].subresourceRange.levelCount = image_create_info.mipLevels; transfer_dst_image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, transfer_dst_image_barrier); transfer_dst_image_barrier[0].image = depth_image; transfer_dst_image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, transfer_dst_image_barrier); // Cause errors due to clearing with invalid image layouts VkClearColorValue color_clear_value = {}; VkImageSubresourceRange clear_range; clear_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; clear_range.baseMipLevel = 0; clear_range.baseArrayLayer = 0; clear_range.layerCount = 1; clear_range.levelCount = 1; // Fail due to explicitly prohibited layout for color clear (only GENERAL and TRANSFER_DST are permitted). // Since the image is currently not in UNDEFINED layout, this will emit two errors. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00005"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00004"); m_commandBuffer->ClearColorImage(dst_image, VK_IMAGE_LAYOUT_UNDEFINED, &color_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); // Fail due to provided layout not matching actual current layout for color clear. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00004"); m_commandBuffer->ClearColorImage(dst_image, VK_IMAGE_LAYOUT_GENERAL, &color_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); VkClearDepthStencilValue depth_clear_value = {}; clear_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; // Fail due to explicitly prohibited layout for depth clear (only GENERAL and TRANSFER_DST are permitted). // Since the image is currently not in UNDEFINED layout, this will emit two errors. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00012"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00011"); m_commandBuffer->ClearDepthStencilImage(depth_image, VK_IMAGE_LAYOUT_UNDEFINED, &depth_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); // Fail due to provided layout not matching actual current layout for depth clear. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00011"); m_commandBuffer->ClearDepthStencilImage(depth_image, VK_IMAGE_LAYOUT_GENERAL, &depth_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); // Now cause error due to bad image layout transition in PipelineBarrier VkImageMemoryBarrier image_barrier[1] = {}; image_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_barrier[0].oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; image_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; image_barrier[0].image = src_image; image_barrier[0].subresourceRange.layerCount = image_create_info.arrayLayers; image_barrier[0].subresourceRange.levelCount = image_create_info.mipLevels; image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-01197"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-01210"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, image_barrier); m_errorMonitor->VerifyFound(); // Finally some layout errors at RenderPass create time // Just hacking in specific state to get to the errors we want so don't copy this unless you know what you're doing. VkAttachmentReference attach = {}; // perf warning for GENERAL layout w/ non-DS input attachment attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.inputAttachmentCount = 1; subpass.pInputAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_UNDEFINED; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // error w/ non-general layout attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Layout for input attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be READ_ONLY_OPTIMAL or GENERAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); subpass.inputAttachmentCount = 0; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach; attach.layout = VK_IMAGE_LAYOUT_GENERAL; // perf warning for GENERAL layout on color attachment m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // error w/ non-color opt or GENERAL layout for color attachment attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Layout for color attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); subpass.colorAttachmentCount = 0; subpass.pDepthStencilAttachment = &attach; attach.layout = VK_IMAGE_LAYOUT_GENERAL; // perf warning for GENERAL layout on DS attachment m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "GENERAL layout for depth attachment may not give optimal performance."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // error w/ non-ds opt or GENERAL layout for color attachment attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Layout for depth attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be " "DEPTH_STENCIL_ATTACHMENT_OPTIMAL, DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // For this error we need a valid renderpass so create default one attach.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; attach.attachment = 0; attach_desc.format = depth_format; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE; attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; // Can't do a CLEAR load on READ_ONLY initialLayout attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; attach_desc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "with invalid first layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL"); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), src_image_mem, NULL); vkFreeMemory(m_device->device(), dst_image_mem, NULL); vkFreeMemory(m_device->device(), depth_image_mem, NULL); vkDestroyImage(m_device->device(), src_image, NULL); vkDestroyImage(m_device->device(), dst_image, NULL); vkDestroyImage(m_device->device(), depth_image, NULL); } TEST_F(VkLayerTest, InvalidStorageImageLayout) { TEST_DESCRIPTION("Attempt to update a STORAGE_IMAGE descriptor w/o GENERAL layout."); ASSERT_NO_FATAL_FAILURE(Init()); const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM; VkImageTiling tiling; VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), tex_format, &format_properties); if (format_properties.linearTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) { tiling = VK_IMAGE_TILING_LINEAR; } else if (format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) { tiling = VK_IMAGE_TILING_OPTIMAL; } else { printf("%s Device does not support VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT; skipped.\n", kSkipPrefix); return; } OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkImageObj image(m_device); image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_STORAGE_BIT, tiling, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(tex_format); VkDescriptorImageInfo image_info = {}; image_info.imageView = view; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; descriptor_write.pImageInfo = &image_info; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type is being updated with layout " "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL but according to spec "); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, NonSimultaneousSecondaryMarksPrimary) { ASSERT_NO_FATAL_FAILURE(Init()); const char *simultaneous_use_message = "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer"; VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); secondary.end(); VkCommandBufferBeginInfo cbbi = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, nullptr, }; m_commandBuffer->begin(&cbbi); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, simultaneous_use_message); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, SimultaneousUseSecondaryTwoExecutes) { ASSERT_NO_FATAL_FAILURE(Init()); const char *simultaneous_use_message = "without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!"; VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo inh = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, }; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, &inh}; secondary.begin(&cbbi); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, SimultaneousUseSecondarySingleExecute) { ASSERT_NO_FATAL_FAILURE(Init()); // variation on previous test executing the same CB twice in the same // CmdExecuteCommands call const char *simultaneous_use_message = "without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!"; VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo inh = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, }; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, &inh}; secondary.begin(&cbbi); secondary.end(); m_commandBuffer->begin(); VkCommandBuffer cbs[] = {secondary.handle(), secondary.handle()}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message); vkCmdExecuteCommands(m_commandBuffer->handle(), 2, cbs); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, SimultaneousUseOneShot) { TEST_DESCRIPTION("Submit the same command buffer twice in one submit looking for simultaneous use and one time submit errors"); const char *simultaneous_use_message = "is already in use and is not marked for simultaneous use"; const char *one_shot_message = "VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted"; ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBuffer cmd_bufs[2]; VkCommandBufferAllocateInfo alloc_info; alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.commandBufferCount = 2; alloc_info.commandPool = m_commandPool->handle(); alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &alloc_info, cmd_bufs); VkCommandBufferBeginInfo cb_binfo; cb_binfo.pNext = NULL; cb_binfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cb_binfo.pInheritanceInfo = VK_NULL_HANDLE; cb_binfo.flags = 0; vkBeginCommandBuffer(cmd_bufs[0], &cb_binfo); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(cmd_bufs[0], 0, 1, &viewport); vkEndCommandBuffer(cmd_bufs[0]); VkCommandBuffer duplicates[2] = {cmd_bufs[0], cmd_bufs[0]}; VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 2; submit_info.pCommandBuffers = duplicates; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Set one time use and now look for one time submit duplicates[0] = duplicates[1] = cmd_bufs[1]; cb_binfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT | VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; vkBeginCommandBuffer(cmd_bufs[1], &cb_binfo); vkCmdSetViewport(cmd_bufs[1], 0, 1, &viewport); vkEndCommandBuffer(cmd_bufs[1]); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, one_shot_message); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); } TEST_F(VkLayerTest, StageMaskGsTsEnabled) { TEST_DESCRIPTION( "Attempt to use a stageMask w/ geometry shader and tesselation shader bits enabled when those features are disabled on the " "device."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Make sure gs & ts are disabled features.geometryShader = false; features.tessellationShader = false; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = test_device.graphics_queue_node_index_; VkCommandPool command_pool; vkCreateCommandPool(test_device.handle(), &pool_create_info, nullptr, &command_pool); VkCommandBufferAllocateInfo cmd = {}; cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmd.pNext = NULL; cmd.commandPool = command_pool; cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cmd.commandBufferCount = 1; VkCommandBuffer cmd_buffer; VkResult err = vkAllocateCommandBuffers(test_device.handle(), &cmd, &cmd_buffer); ASSERT_VK_SUCCESS(err); VkEvent event; VkEventCreateInfo evci = {}; evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; VkResult result = vkCreateEvent(test_device.handle(), &evci, NULL, &event); ASSERT_VK_SUCCESS(result); VkCommandBufferBeginInfo cbbi = {}; cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(cmd_buffer, &cbbi); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-01150"); vkCmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-01151"); vkCmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT); m_errorMonitor->VerifyFound(); vkDestroyEvent(test_device.handle(), event, NULL); vkDestroyCommandPool(test_device.handle(), command_pool, NULL); } TEST_F(VkLayerTest, EventInUseDestroyedSignaled) { ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); VkEvent event; VkEventCreateInfo event_create_info = {}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); m_commandBuffer->end(); vkDestroyEvent(m_device->device(), event, nullptr); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is invalid because bound"); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InUseDestroyedSignaled) { TEST_DESCRIPTION( "Use vkCmdExecuteCommands with invalid state in primary and secondary command buffers. Delete objects that are in use. " "Call VkQueueSubmit with an event that has been deleted."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->ExpectSuccess(); VkSemaphoreCreateInfo semaphore_create_info = {}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; VkSemaphore semaphore; ASSERT_VK_SUCCESS(vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore)); VkFenceCreateInfo fence_create_info = {}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; VkFence fence; ASSERT_VK_SUCCESS(vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence)); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); VkDescriptorBufferInfo buffer_info = {}; buffer_info.buffer = buffer_test.GetBuffer(); buffer_info.offset = 0; buffer_info.range = 1024; VkWriteDescriptorSet write_descriptor_set = {}; write_descriptor_set.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_descriptor_set.dstSet = ds.set_; write_descriptor_set.descriptorCount = 1; write_descriptor_set.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; write_descriptor_set.pBufferInfo = &buffer_info; vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor_set, 0, nullptr); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); pipe.CreateVKPipeline(pipeline_layout.handle(), m_renderPass); VkEvent event; VkEventCreateInfo event_create_info = {}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); m_commandBuffer->begin(); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, NULL); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); m_errorMonitor->Reset(); // resume logmsg processing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyEvent-event-01145"); vkDestroyEvent(m_device->device(), event, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroySemaphore-semaphore-01137"); vkDestroySemaphore(m_device->device(), semaphore, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Fence 0x"); vkDestroyFence(m_device->device(), fence, nullptr); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If semaphore is not VK_NULL_HANDLE, semaphore must be a valid VkSemaphore handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Semaphore obj"); vkDestroySemaphore(m_device->device(), semaphore, nullptr); m_errorMonitor->SetUnexpectedError("If fence is not VK_NULL_HANDLE, fence must be a valid VkFence handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Fence obj"); vkDestroyFence(m_device->device(), fence, nullptr); m_errorMonitor->SetUnexpectedError("If event is not VK_NULL_HANDLE, event must be a valid VkEvent handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Event obj"); vkDestroyEvent(m_device->device(), event, nullptr); } TEST_F(VkLayerTest, QueryPoolInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use query pool."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_ci{}; query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_ci.queryType = VK_QUERY_TYPE_TIMESTAMP; query_pool_ci.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool); m_commandBuffer->begin(); // Reset query pool to create binding with cmd buffer vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetQueryPoolResults-queryType-00818"); uint32_t data_space[16]; m_errorMonitor->SetUnexpectedError("Cannot get query results on queryPool"); vkGetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space), &data_space, sizeof(uint32_t), VK_QUERY_RESULT_PARTIAL_BIT); m_errorMonitor->VerifyFound(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy query pool while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyQueryPool-queryPool-00793"); vkDestroyQueryPool(m_device->handle(), query_pool, NULL); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now that cmd buffer done we can safely destroy query_pool m_errorMonitor->SetUnexpectedError("If queryPool is not VK_NULL_HANDLE, queryPool must be a valid VkQueryPool handle"); m_errorMonitor->SetUnexpectedError("Unable to remove QueryPool obj"); vkDestroyQueryPool(m_device->handle(), query_pool, NULL); } TEST_F(VkLayerTest, PipelineInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use pipeline."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkPipelineLayoutObj pipeline_layout(m_device); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyPipeline-pipeline-00765"); // Create PSO to be used for draw-time errors below VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Store pipeline handle so we can actually delete it before test finishes VkPipeline delete_this_pipeline; { // Scope pipeline so it will be auto-deleted VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); delete_this_pipeline = pipe.handle(); m_commandBuffer->begin(); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then pipeline destroyed while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } // Pipeline deletion triggered here m_errorMonitor->VerifyFound(); // Make sure queue finished and then actually delete pipeline vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If pipeline is not VK_NULL_HANDLE, pipeline must be a valid VkPipeline handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Pipeline obj"); vkDestroyPipeline(m_device->handle(), delete_this_pipeline, nullptr); } TEST_F(VkLayerTest, CreateImageViewBreaksParameterCompatibilityRequirements) { TEST_DESCRIPTION( "Attempts to create an Image View with a view type that does not match the image type it is being created from."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); VkPhysicalDeviceMemoryProperties memProps; vkGetPhysicalDeviceMemoryProperties(m_device->phy().handle(), &memProps); // Test mismatch detection for image of type VK_IMAGE_TYPE_1D VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_1D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image1D(m_device); image1D.init(&imgInfo); ASSERT_TRUE(image1D.initialized()); // Initialize VkImageViewCreateInfo with mismatched viewType VkImageView imageView; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image1D.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_2D is not compatible with image"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Test mismatch detection for image of type VK_IMAGE_TYPE_2D imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 6, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image2D(m_device); image2D.init(&imgInfo); ASSERT_TRUE(image2D.initialized()); // Initialize VkImageViewCreateInfo with mismatched viewType ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image2D.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_3D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_3D is not compatible with image"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Change VkImageViewCreateInfo to different mismatched viewType ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE; ivci.subresourceRange.layerCount = 6; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01003"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Test mismatch detection for image of type VK_IMAGE_TYPE_3D imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_3D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image3D(m_device); image3D.init(&imgInfo); ASSERT_TRUE(image3D.initialized()); // Initialize VkImageViewCreateInfo with mismatched viewType ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image3D.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_1D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_1D is not compatible with image"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Change VkImageViewCreateInfo to different mismatched viewType ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; // Test for error message if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01005"); } else { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subResourceRange-01021"); } vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Check if the device can make the image required for this test case. VkImageFormatProperties formProps = {{0, 0, 0}, 0, 0, 0, 0}; VkResult res = vkGetPhysicalDeviceImageFormatProperties( m_device->phy().handle(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_3D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR | VK_IMAGE_CREATE_SPARSE_BINDING_BIT, &formProps); // If not, skip this part of the test. if (res || !m_device->phy().features().sparseBinding || !DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { printf("%s %s is not supported.\n", kSkipPrefix, VK_KHR_MAINTENANCE1_EXTENSION_NAME); return; } // Initialize VkImageCreateInfo with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR and VK_IMAGE_CREATE_SPARSE_BINDING_BIT which // are incompatible create flags. imgInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR | VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_TYPE_3D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImage imageSparse; // Creating a sparse image means we should not bind memory to it. res = vkCreateImage(m_device->device(), &imgInfo, NULL, &imageSparse); ASSERT_FALSE(res); // Initialize VkImageViewCreateInfo to create a view that will attempt to utilize VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR. ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = imageSparse; ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " when the VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or " "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT flags are enabled."); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Clean up vkDestroyImage(m_device->device(), imageSparse, nullptr); } TEST_F(VkLayerTest, CreateImageViewFormatFeatureMismatch) { TEST_DESCRIPTION("Create view with a format that does not have the same features as the image format."); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Failed to device profile layer.\n", kSkipPrefix); return; } // List of features to be tested VkFormatFeatureFlagBits features[] = {VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT, VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT}; uint32_t feature_count = 4; // List of usage cases for each feature test VkImageUsageFlags usages[] = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_STORAGE_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT}; // List of errors that will be thrown in order of tests run std::string optimal_error_codes[] = { "VUID-VkImageViewCreateInfo-usage-02274", "VUID-VkImageViewCreateInfo-usage-02275", "VUID-VkImageViewCreateInfo-usage-02276", "VUID-VkImageViewCreateInfo-usage-02277", }; VkFormatProperties formatProps; // First three tests uint32_t i = 0; for (i = 0; i < (feature_count - 1); i++) { // Modify formats to have mismatched features // Format for image fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, &formatProps); formatProps.optimalTilingFeatures |= features[i]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, formatProps); memset(&formatProps, 0, sizeof(formatProps)); // Format for view fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, &formatProps); formatProps.optimalTilingFeatures = features[(i + 1) % feature_count]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, formatProps); // Create image with modified format VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R32G32B32A32_UINT, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, usages[i], VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); VkImageView imageView; // Initialize VkImageViewCreateInfo with modified format VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R32G32B32A32_SINT; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, optimal_error_codes[i]); VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (!res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } } // Test for VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT. Needs special formats // Only run this test if format supported if (!ImageFormatIsSupported(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, VK_IMAGE_TILING_OPTIMAL)) { printf("%s VK_FORMAT_D24_UNORM_S8_UINT format not supported - skipped.\n", kSkipPrefix); return; } // Modify formats to have mismatched features // Format for image fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, &formatProps); formatProps.optimalTilingFeatures |= features[i]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, formatProps); memset(&formatProps, 0, sizeof(formatProps)); // Format for view fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, &formatProps); formatProps.optimalTilingFeatures = features[(i + 1) % feature_count]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, formatProps); // Create image with modified format VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_D24_UNORM_S8_UINT, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, usages[i], VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); VkImageView imageView; // Initialize VkImageViewCreateInfo with modified format VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_D32_SFLOAT_S8_UINT; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, optimal_error_codes[i]); VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (!res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } } TEST_F(VkLayerTest, InvalidImageViewUsageCreateInfo) { TEST_DESCRIPTION("Usage modification via a chained VkImageViewUsageCreateInfo struct"); if (!EnableDeviceProfileLayer()) { printf("%s Test requires DeviceProfileLayer, unavailable - skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { printf("%s Test requires API >= 1.1 or KHR_MAINTENANCE2 extension, unavailable - skipped.\n", kSkipPrefix); return; } m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Required extensions are not avaiable.\n", kSkipPrefix); return; } VkFormatProperties formatProps; // Ensure image format claims support for sampled and storage, excludes color attachment memset(&formatProps, 0, sizeof(formatProps)); fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, &formatProps); formatProps.optimalTilingFeatures |= (VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT); formatProps.optimalTilingFeatures = formatProps.optimalTilingFeatures & ~VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, formatProps); // Create image with sampled and storage usages VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R32G32B32A32_UINT, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); // Force the imageview format to exclude storage feature, include color attachment memset(&formatProps, 0, sizeof(formatProps)); fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, &formatProps); formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; formatProps.optimalTilingFeatures = (formatProps.optimalTilingFeatures & ~VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT); fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, formatProps); VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R32G32B32A32_SINT; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // ImageView creation should fail because view format doesn't support all the underlying image's usages VkImageView imageView; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-usage-02275"); VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Add a chained VkImageViewUsageCreateInfo to override original image usage bits, removing storage VkImageViewUsageCreateInfo usage_ci = {VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, nullptr, VK_IMAGE_USAGE_SAMPLED_BIT}; // Link the VkImageViewUsageCreateInfo struct into the view's create info pNext chain ivci.pNext = &usage_ci; // ImageView should now succeed without error m_errorMonitor->ExpectSuccess(); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } // Try a zero usage field usage_ci.usage = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewUsageCreateInfo-usage-requiredbitmask"); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } // Try a usage field with a bit not supported by underlying image usage_ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewUsageCreateInfo-usage-01587"); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } // Try an illegal bit in usage field usage_ci.usage = 0x10000000 | VK_IMAGE_USAGE_SAMPLED_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewUsageCreateInfo-usage-parameter"); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } } TEST_F(VkLayerTest, ImageViewInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use imageView."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo image_info{}; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; image_info.imageView = view; image_info.sampler = sampler; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to use the sampler char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyImageView-imageView-01026"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer then destroy sampler VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy imageView while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkDestroyImageView(m_device->device(), view, nullptr); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now we can actually destroy imageView m_errorMonitor->SetUnexpectedError("If imageView is not VK_NULL_HANDLE, imageView must be a valid VkImageView handle"); m_errorMonitor->SetUnexpectedError("Unable to remove ImageView obj"); vkDestroyImageView(m_device->device(), view, NULL); vkDestroySampler(m_device->device(), sampler, nullptr); } TEST_F(VkLayerTest, BufferViewInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use bufferView."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferView view; VkBufferViewCreateInfo bvci = {}; bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; bvci.buffer = buffer; bvci.format = VK_FORMAT_R32_SFLOAT; bvci.range = VK_WHOLE_SIZE; err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view); ASSERT_VK_SUCCESS(err); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0, r32f) uniform readonly imageBuffer s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = imageLoad(s, 0);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyBufferView-bufferView-00936"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy bufferView while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkDestroyBufferView(m_device->device(), view, nullptr); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now we can actually destroy bufferView m_errorMonitor->SetUnexpectedError("If bufferView is not VK_NULL_HANDLE, bufferView must be a valid VkBufferView handle"); m_errorMonitor->SetUnexpectedError("Unable to remove BufferView obj"); vkDestroyBufferView(m_device->device(), view, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); } TEST_F(VkLayerTest, SamplerInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use sampler."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo image_info{}; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; image_info.imageView = view; image_info.sampler = sampler; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to use the sampler char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroySampler-sampler-01082"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer then destroy sampler VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy sampler while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkDestroySampler(m_device->device(), sampler, nullptr); // Destroyed too soon m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now we can actually destroy sampler m_errorMonitor->SetUnexpectedError("If sampler is not VK_NULL_HANDLE, sampler must be a valid VkSampler handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Sampler obj"); vkDestroySampler(m_device->device(), sampler, NULL); // Destroyed for real vkDestroyImageView(m_device->device(), view, NULL); } TEST_F(VkLayerTest, UpdateDestroyDescriptorSetLayout) { TEST_DESCRIPTION("Attempt updates to descriptor sets with destroyed descriptor set layouts"); // TODO: Update to match the descriptor set layout specific VUIDs/VALIDATION_ERROR_* when present const auto kWriteDestroyedLayout = "VUID-VkWriteDescriptorSet-dstSet-00320"; const auto kCopyDstDestroyedLayout = "VUID-VkCopyDescriptorSet-dstSet-parameter"; const auto kCopySrcDestroyedLayout = "VUID-VkCopyDescriptorSet-srcSet-parameter"; ASSERT_NO_FATAL_FAILURE(Init()); // Set up the descriptor (resource) and write/copy operations to use. float data[16] = {}; VkConstantBufferObj buffer(m_device, sizeof(data), data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); ASSERT_TRUE(buffer.initialized()); VkDescriptorBufferInfo info = {}; info.buffer = buffer.handle(); info.range = VK_WHOLE_SIZE; VkWriteDescriptorSet write_descriptor = {}; write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_descriptor.dstSet = VK_NULL_HANDLE; // must update this write_descriptor.dstBinding = 0; write_descriptor.descriptorCount = 1; write_descriptor.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; write_descriptor.pBufferInfo = &info; VkCopyDescriptorSet copy_descriptor = {}; copy_descriptor.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_descriptor.srcSet = VK_NULL_HANDLE; // must update copy_descriptor.srcBinding = 0; copy_descriptor.dstSet = VK_NULL_HANDLE; // must update copy_descriptor.dstBinding = 0; copy_descriptor.descriptorCount = 1; // Create valid and invalid source and destination descriptor sets std::vector<VkDescriptorSetLayoutBinding> one_uniform_buffer = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }; OneOffDescriptorSet good_dst(m_device, one_uniform_buffer); ASSERT_TRUE(good_dst.Initialized()); OneOffDescriptorSet bad_dst(m_device, one_uniform_buffer); // Must assert before invalidating it below ASSERT_TRUE(bad_dst.Initialized()); bad_dst.layout_ = VkDescriptorSetLayoutObj(); OneOffDescriptorSet good_src(m_device, one_uniform_buffer); ASSERT_TRUE(good_src.Initialized()); // Put valid data in the good and bad sources, simultaneously doing a positive test on write and copy operations m_errorMonitor->ExpectSuccess(); write_descriptor.dstSet = good_src.set_; vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor, 0, NULL); m_errorMonitor->VerifyNotFound(); OneOffDescriptorSet bad_src(m_device, one_uniform_buffer); ASSERT_TRUE(bad_src.Initialized()); // to complete our positive testing use copy, where above we used write. copy_descriptor.srcSet = good_src.set_; copy_descriptor.dstSet = bad_src.set_; vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor); bad_src.layout_ = VkDescriptorSetLayoutObj(); m_errorMonitor->VerifyNotFound(); // Trigger the three invalid use errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kWriteDestroyedLayout); write_descriptor.dstSet = bad_dst.set_; vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor, 0, NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kCopyDstDestroyedLayout); copy_descriptor.dstSet = bad_dst.set_; vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kCopySrcDestroyedLayout); copy_descriptor.srcSet = bad_src.set_; copy_descriptor.dstSet = good_dst.set_; vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, QueueForwardProgressFenceWait) { TEST_DESCRIPTION( "Call VkQueueSubmit with a semaphore that is already signaled but not waited on by the queue. Wait on a fence that has not " "yet been submitted to a queue."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *queue_forward_progress_message = " that was previously signaled by queue 0x"; const char *invalid_fence_wait_message = " which has not been submitted on a Queue or during acquire next image."; VkCommandBufferObj cb1(m_device, m_commandPool); cb1.begin(); cb1.end(); VkSemaphoreCreateInfo semaphore_create_info = {}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; VkSemaphore semaphore; ASSERT_VK_SUCCESS(vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore)); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cb1.handle(); submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_commandBuffer->begin(); m_commandBuffer->end(); submit_info.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, queue_forward_progress_message); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); VkFenceCreateInfo fence_create_info = {}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; VkFence fence; ASSERT_VK_SUCCESS(vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, invalid_fence_wait_message); vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); m_errorMonitor->VerifyFound(); vkDeviceWaitIdle(m_device->device()); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); } TEST_F(VkLayerTest, FramebufferIncompatible) { TEST_DESCRIPTION( "Bind a secondary command buffer with a framebuffer that does not match the framebuffer for the active renderpass."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_B8G8R8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_B8G8R8A8_UNORM, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkCommandBufferAllocateInfo cbai = {}; cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cbai.commandPool = m_commandPool->handle(); cbai.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; cbai.commandBufferCount = 1; VkCommandBuffer sec_cb; err = vkAllocateCommandBuffers(m_device->device(), &cbai, &sec_cb); ASSERT_VK_SUCCESS(err); VkCommandBufferBeginInfo cbbi = {}; VkCommandBufferInheritanceInfo cbii = {}; cbii.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; cbii.renderPass = renderPass(); cbii.framebuffer = fb; cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cbbi.pNext = NULL; cbbi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; cbbi.pInheritanceInfo = &cbii; vkBeginCommandBuffer(sec_cb, &cbbi); vkEndCommandBuffer(sec_cb); VkCommandBufferBeginInfo cbbi2 = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, nullptr}; vkBeginCommandBuffer(m_commandBuffer->handle(), &cbbi2); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is not the same as the primary command buffer's current active framebuffer "); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &sec_cb); m_errorMonitor->VerifyFound(); // Cleanup vkCmdEndRenderPass(m_commandBuffer->handle()); vkEndCommandBuffer(m_commandBuffer->handle()); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyRenderPass(m_device->device(), rp, NULL); vkDestroyFramebuffer(m_device->device(), fb, NULL); } TEST_F(VkLayerTest, RenderPassMissingAttachment) { TEST_DESCRIPTION("Begin render pass with missing framebuffer attachment"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create a renderPass with a single color attachment VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); auto createView = lvl_init_struct<VkImageViewCreateInfo>(); createView.image = m_renderTargets[0]->handle(); createView.viewType = VK_IMAGE_VIEW_TYPE_2D; createView.format = VK_FORMAT_B8G8R8A8_UNORM; createView.components.r = VK_COMPONENT_SWIZZLE_R; createView.components.g = VK_COMPONENT_SWIZZLE_G; createView.components.b = VK_COMPONENT_SWIZZLE_B; createView.components.a = VK_COMPONENT_SWIZZLE_A; createView.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}; createView.flags = 0; VkImageView iv; vkCreateImageView(m_device->handle(), &createView, nullptr, &iv); auto fb_info = lvl_init_struct<VkFramebufferCreateInfo>(); fb_info.renderPass = rp; fb_info.attachmentCount = 1; fb_info.pAttachments = &iv; fb_info.width = 100; fb_info.height = 100; fb_info.layers = 1; // Create the framebuffer then destory the view it uses. VkFramebuffer fb; err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); vkDestroyImageView(device(), iv, NULL); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassBeginInfo-framebuffer-parameter"); auto rpbi = lvl_init_struct<VkRenderPassBeginInfo>(); rpbi.renderPass = rp; rpbi.framebuffer = fb; rpbi.renderArea = {{0, 0}, {32, 32}}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); // Don't call vkCmdEndRenderPass; as the begin has been "skipped" based on the error condition m_errorMonitor->VerifyFound(); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, NULL); vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, ColorBlendInvalidLogicOp) { TEST_DESCRIPTION("Attempt to use invalid VkPipelineColorBlendStateCreateInfo::logicOp value."); ASSERT_NO_FATAL_FAILURE(Init()); // enables all supported features ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().logicOp) { printf("%s Device does not support logicOp feature; skipped.\n", kSkipPrefix); return; } const auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.cb_ci_.logicOpEnable = VK_TRUE; helper.cb_ci_.logicOp = static_cast<VkLogicOp>(VK_LOGIC_OP_END_RANGE + 1); // invalid logicOp to be tested }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00607"); } TEST_F(VkLayerTest, ColorBlendUnsupportedLogicOp) { TEST_DESCRIPTION("Attempt enabling VkPipelineColorBlendStateCreateInfo::logicOpEnable when logicOp feature is disabled."); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.cb_ci_.logicOpEnable = VK_TRUE; }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606"); } TEST_F(VkLayerTest, ColorBlendUnsupportedDualSourceBlend) { TEST_DESCRIPTION("Attempt to use dual-source blending when dualSrcBlend feature is disabled."); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto set_dsb_src_color_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC1_COLOR; // bad! helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_src_color_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608"); const auto set_dsb_dst_color_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR; // bad helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_dst_color_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609"); const auto set_dsb_src_alpha_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC1_ALPHA; // bad helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_src_alpha_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610"); const auto set_dsb_dst_alpha_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA; // bad! helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_dst_alpha_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611"); } #if GTEST_IS_THREADSAFE struct thread_data_struct { VkCommandBuffer commandBuffer; VkDevice device; VkEvent event; bool bailout; }; extern "C" void *AddToCommandBuffer(void *arg) { struct thread_data_struct *data = (struct thread_data_struct *)arg; for (int i = 0; i < 80000; i++) { vkCmdSetEvent(data->commandBuffer, data->event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); if (data->bailout) { break; } } return NULL; } TEST_F(VkLayerTest, ThreadCommandBufferCollision) { test_platform_thread thread; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "THREADING ERROR"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Calls AllocateCommandBuffers VkCommandBufferObj commandBuffer(m_device, m_commandPool); commandBuffer.begin(); VkEventCreateInfo event_info; VkEvent event; VkResult err; memset(&event_info, 0, sizeof(event_info)); event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; err = vkCreateEvent(device(), &event_info, NULL, &event); ASSERT_VK_SUCCESS(err); err = vkResetEvent(device(), event); ASSERT_VK_SUCCESS(err); struct thread_data_struct data; data.commandBuffer = commandBuffer.handle(); data.event = event; data.bailout = false; m_errorMonitor->SetBailout(&data.bailout); // First do some correct operations using multiple threads. // Add many entries to command buffer from another thread. test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data); // Make non-conflicting calls from this thread at the same time. for (int i = 0; i < 80000; i++) { uint32_t count; vkEnumeratePhysicalDevices(instance(), &count, NULL); } test_platform_thread_join(thread, NULL); // Then do some incorrect operations using multiple threads. // Add many entries to command buffer from another thread. test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data); // Add many entries to command buffer from this thread at the same time. AddToCommandBuffer(&data); test_platform_thread_join(thread, NULL); commandBuffer.end(); m_errorMonitor->SetBailout(NULL); m_errorMonitor->VerifyFound(); vkDestroyEvent(device(), event, NULL); } #endif // GTEST_IS_THREADSAFE TEST_F(VkLayerTest, InvalidSPIRVCodeSize) { TEST_DESCRIPTION("Test that errors are produced for a spirv modules with invalid code sizes"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid SPIR-V header"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderModule module; VkShaderModuleCreateInfo moduleCreateInfo; struct icd_spv_header spv; spv.magic = ICD_SPV_MAGIC; spv.version = ICD_SPV_VERSION; spv.gen_magic = 0; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.pCode = (const uint32_t *)&spv; moduleCreateInfo.codeSize = 4; moduleCreateInfo.flags = 0; vkCreateShaderModule(m_device->device(), &moduleCreateInfo, NULL, &module); m_errorMonitor->VerifyFound(); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out float x;\n" "void main(){\n" " gl_Position = vec4(1);\n" " x = 0;\n" "}\n"; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkShaderModuleCreateInfo-pCode-01376"); std::vector<unsigned int> shader; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; this->GLSLtoSPV(VK_SHADER_STAGE_VERTEX_BIT, vsSource, shader); module_create_info.pCode = shader.data(); // Introduce failure by making codeSize a non-multiple of 4 module_create_info.codeSize = shader.size() * sizeof(unsigned int) - 1; module_create_info.flags = 0; vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidSPIRVMagic) { TEST_DESCRIPTION("Test that an error is produced for a spirv module with a bad magic number"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid SPIR-V magic number"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderModule module; VkShaderModuleCreateInfo moduleCreateInfo; struct icd_spv_header spv; spv.magic = (uint32_t)~ICD_SPV_MAGIC; spv.version = ICD_SPV_VERSION; spv.gen_magic = 0; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.pCode = (const uint32_t *)&spv; moduleCreateInfo.codeSize = sizeof(spv) + 16; moduleCreateInfo.flags = 0; vkCreateShaderModule(m_device->device(), &moduleCreateInfo, NULL, &module); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVertexOutputNotConsumed) { TEST_DESCRIPTION("Test that a warning is produced for a vertex output that is not consumed by the fragment stage"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "not consumed by fragment shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out float x;\n" "void main(){\n" " gl_Position = vec4(1);\n" " x = 0;\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineComplexTypes) { TEST_DESCRIPTION("Smoke test for complex types across VS/FS boundary"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); char const *vsSource = "#version 450\n" "void main() {}"; char const *tcsSource = "#version 450\n" "layout(vertices=3) out;\n" "struct S { int x; };\n" "layout(location=2) patch out B { S s; } b;\n" "void main() {\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" " b.s.x = 1;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "struct S { int x; };\n" "layout(location=2) patch in B { S s; } b;\n" "void main() { gl_Position = vec4(b.s.x); }\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 c;\n" "void main() { c = vec4(1); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); pipe.SetInputAssembly(&iasci); pipe.SetTessellation(&tsci); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderBadSpecialization) { TEST_DESCRIPTION("Challenge core_validation with shader validation issues related to vkCreateGraphicsPipelines."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *bad_specialization_message = "Specialization entry 0 (for constant id 0) references memory outside provided specialization data "; char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout (constant_id = 0) const float r = 0.0f;\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(r,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device); VkPipelineViewportStateCreateInfo vp_state_create_info = {}; vp_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp_state_create_info.viewportCount = 1; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; vp_state_create_info.pViewports = &viewport; vp_state_create_info.scissorCount = 1; VkDynamicState scissor_state = VK_DYNAMIC_STATE_SCISSOR; VkPipelineDynamicStateCreateInfo pipeline_dynamic_state_create_info = {}; pipeline_dynamic_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; pipeline_dynamic_state_create_info.dynamicStateCount = 1; pipeline_dynamic_state_create_info.pDynamicStates = &scissor_state; VkPipelineShaderStageCreateInfo shader_stage_create_info[2] = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; VkPipelineVertexInputStateCreateInfo vertex_input_create_info = {}; vertex_input_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; VkPipelineInputAssemblyStateCreateInfo input_assembly_create_info = {}; input_assembly_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_create_info.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkPipelineRasterizationStateCreateInfo rasterization_state_create_info = {}; rasterization_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state_create_info.pNext = nullptr; rasterization_state_create_info.lineWidth = 1.0f; rasterization_state_create_info.rasterizerDiscardEnable = true; VkPipelineColorBlendAttachmentState color_blend_attachment_state = {}; color_blend_attachment_state.blendEnable = VK_FALSE; color_blend_attachment_state.colorWriteMask = 0xf; VkPipelineColorBlendStateCreateInfo color_blend_state_create_info = {}; color_blend_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; color_blend_state_create_info.attachmentCount = 1; color_blend_state_create_info.pAttachments = &color_blend_attachment_state; VkGraphicsPipelineCreateInfo graphicspipe_create_info = {}; graphicspipe_create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; graphicspipe_create_info.stageCount = 2; graphicspipe_create_info.pStages = shader_stage_create_info; graphicspipe_create_info.pVertexInputState = &vertex_input_create_info; graphicspipe_create_info.pInputAssemblyState = &input_assembly_create_info; graphicspipe_create_info.pViewportState = &vp_state_create_info; graphicspipe_create_info.pRasterizationState = &rasterization_state_create_info; graphicspipe_create_info.pColorBlendState = &color_blend_state_create_info; graphicspipe_create_info.pDynamicState = &pipeline_dynamic_state_create_info; graphicspipe_create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; graphicspipe_create_info.layout = pipeline_layout.handle(); graphicspipe_create_info.renderPass = renderPass(); VkPipelineCacheCreateInfo pipeline_cache_create_info = {}; pipeline_cache_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkPipelineCache pipelineCache; ASSERT_VK_SUCCESS(vkCreatePipelineCache(m_device->device(), &pipeline_cache_create_info, nullptr, &pipelineCache)); // This structure maps constant ids to data locations. const VkSpecializationMapEntry entry = // id, offset, size {0, 4, sizeof(uint32_t)}; // Challenge core validation by using a bogus offset. uint32_t data = 1; // Set up the info describing spec map and data const VkSpecializationInfo specialization_info = { 1, &entry, 1 * sizeof(float), &data, }; shader_stage_create_info[0].pSpecializationInfo = &specialization_info; VkPipeline pipeline; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_specialization_message); vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &graphicspipe_create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); vkDestroyPipelineCache(m_device->device(), pipelineCache, nullptr); } TEST_F(VkLayerTest, CreatePipelineCheckShaderDescriptorTypeMismatch) { TEST_DESCRIPTION("Challenge core_validation with shader validation issues related to vkCreateGraphicsPipelines."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *descriptor_type_mismatch_message = "Type mismatch on descriptor slot 0.0 "; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); char const *vsSource = "#version 450\n" "\n" "layout (std140, set = 0, binding = 0) uniform buf {\n" " mat4 mvp;\n" "} ubuf;\n" "void main(){\n" " gl_Position = ubuf.mvp * vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, descriptor_type_mismatch_message); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderDescriptorNotAccessible) { TEST_DESCRIPTION( "Create a pipeline in which a descriptor used by a shader stage does not include that stage in its stageFlags."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *descriptor_not_accessible_message = "Shader uses descriptor slot 0.0 "; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT /*!*/, nullptr}, }); char const *vsSource = "#version 450\n" "\n" "layout (std140, set = 0, binding = 0) uniform buf {\n" " mat4 mvp;\n" "} ubuf;\n" "void main(){\n" " gl_Position = ubuf.mvp * vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, descriptor_not_accessible_message); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderPushConstantNotAccessible) { TEST_DESCRIPTION( "Create a graphics pipeline in which a push constant range containing a push constant block member is not accessible from " "the current shader stage."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *push_constant_not_accessible_message = "Push constant range covering variable starting at offset 0 not accessible from stage VK_SHADER_STAGE_VERTEX_BIT"; char const *vsSource = "#version 450\n" "\n" "layout(push_constant, std430) uniform foo { float x; } consts;\n" "void main(){\n" " gl_Position = vec4(consts.x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set up a push constant range VkPushConstantRange push_constant_range = {}; // Set to the wrong stage to challenge core_validation push_constant_range.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; push_constant_range.size = 4; const VkPipelineLayoutObj pipeline_layout(m_device, {}, {push_constant_range}); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, push_constant_not_accessible_message); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderNotEnabled) { TEST_DESCRIPTION( "Create a graphics pipeline in which a capability declared by the shader requires a feature not enabled on the device."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *feature_not_enabled_message = "Shader requires VkPhysicalDeviceFeatures::shaderFloat64 but is not enabled on the device"; // Some awkward steps are required to test with custom device features. std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Disable support for 64 bit floats features.shaderFloat64 = false; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " dvec4 green = vec4(0.0, 1.0, 0.0, 1.0);\n" " color = vec4(green);\n" "}\n"; VkShaderObj vs(&test_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(&test_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkRenderpassObj render_pass(&test_device); VkPipelineObj pipe(&test_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); const VkPipelineLayoutObj pipeline_layout(&test_device); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, feature_not_enabled_message); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateShaderModuleCheckBadCapability) { TEST_DESCRIPTION("Create a shader in which a capability declared by the shader is not supported."); // Note that this failure message comes from spirv-tools, specifically the validator. ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const std::string spv_source = R"( OpCapability ImageRect OpEntryPoint Vertex %main "main" %main = OpFunction %void None %3 OpReturn OpFunctionEnd )"; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Capability ImageRect is not allowed by Vulkan"); std::vector<unsigned int> spv; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; ASMtoSPV(SPV_ENV_VULKAN_1_0, 0, spv_source.data(), spv); module_create_info.pCode = spv.data(); module_create_info.codeSize = spv.size() * sizeof(unsigned int); module_create_info.flags = 0; VkResult err = vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyShaderModule(m_device->handle(), shader_module, NULL); } } TEST_F(VkPositiveLayerTest, ShaderRelaxedBlockLayout) { // This is a positive test, no errors expected // Verifies the ability to relax block layout rules with a shader that requires them to be relaxed TEST_DESCRIPTION("Create a shader that requires relaxed block layout."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // The Relaxed Block Layout extension was promoted to core in 1.1. // Go ahead and check for it and turn it on in case a 1.0 device has it. if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Vertex shader requiring relaxed layout. // Without relaxed layout, we would expect a message like: // "Structure id 2 decorated as Block for variable in Uniform storage class // must follow standard uniform buffer layout rules: member 1 at offset 4 is not aligned to 16" const std::string spv_source = R"( OpCapability Shader OpMemoryModel Logical GLSL450 OpEntryPoint Vertex %main "main" OpSource GLSL 450 OpMemberDecorate %S 0 Offset 0 OpMemberDecorate %S 1 Offset 4 OpDecorate %S Block OpDecorate %B DescriptorSet 0 OpDecorate %B Binding 0 %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v3float = OpTypeVector %float 3 %S = OpTypeStruct %float %v3float %_ptr_Uniform_S = OpTypePointer Uniform %S %B = OpVariable %_ptr_Uniform_S Uniform %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; std::vector<unsigned int> spv; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; ASMtoSPV(SPV_ENV_VULKAN_1_0, 0, spv_source.data(), spv); module_create_info.pCode = spv.data(); module_create_info.codeSize = spv.size() * sizeof(unsigned int); module_create_info.flags = 0; m_errorMonitor->ExpectSuccess(); VkResult err = vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyShaderModule(m_device->handle(), shader_module, NULL); } } TEST_F(VkPositiveLayerTest, ShaderScalarBlockLayout) { // This is a positive test, no errors expected // Verifies the ability to scalar block layout rules with a shader that requires them to be relaxed TEST_DESCRIPTION("Create a shader that requires scalar block layout."); // Enable req'd extensions if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for the Scalar Block Layout extension and turn it on if it's available if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME); PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2 = (PFN_vkGetPhysicalDeviceFeatures2)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); auto scalar_block_features = lvl_init_struct<VkPhysicalDeviceScalarBlockLayoutFeaturesEXT>(NULL); scalar_block_features.scalarBlockLayout = VK_TRUE; auto query_features2 = lvl_init_struct<VkPhysicalDeviceFeatures2>(&scalar_block_features); vkGetPhysicalDeviceFeatures2(gpu(), &query_features2); auto set_features2 = lvl_init_struct<VkPhysicalDeviceFeatures2>(&scalar_block_features); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &set_features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Vertex shader requiring scalar layout. // Without scalar layout, we would expect a message like: // "Structure id 2 decorated as Block for variable in Uniform storage class // must follow standard uniform buffer layout rules: member 1 at offset 4 is not aligned to 16" const std::string spv_source = R"( OpCapability Shader OpMemoryModel Logical GLSL450 OpEntryPoint Vertex %main "main" OpSource GLSL 450 OpMemberDecorate %S 0 Offset 0 OpMemberDecorate %S 1 Offset 4 OpMemberDecorate %S 2 Offset 8 OpDecorate %S Block OpDecorate %B DescriptorSet 0 OpDecorate %B Binding 0 %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v3float = OpTypeVector %float 3 %S = OpTypeStruct %float %float %v3float %_ptr_Uniform_S = OpTypePointer Uniform %S %B = OpVariable %_ptr_Uniform_S Uniform %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; std::vector<unsigned int> spv; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; ASMtoSPV(SPV_ENV_VULKAN_1_0, 0, spv_source.data(), spv); module_create_info.pCode = spv.data(); module_create_info.codeSize = spv.size() * sizeof(unsigned int); module_create_info.flags = 0; m_errorMonitor->ExpectSuccess(); VkResult err = vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyShaderModule(m_device->handle(), shader_module, NULL); } } TEST_F(VkPositiveLayerTest, SpirvGroupDecorations) { TEST_DESCRIPTION("Test shader validation support for group decorations."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const std::string spv_source = R"( OpCapability Shader OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" %gl_GlobalInvocationID OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 430 OpName %main "main" OpName %gl_GlobalInvocationID "gl_GlobalInvocationID" OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId OpDecorate %_runtimearr_float ArrayStride 4 OpDecorate %4 BufferBlock OpDecorate %5 Offset 0 %4 = OpDecorationGroup %5 = OpDecorationGroup OpGroupDecorate %4 %_struct_6 %_struct_7 %_struct_8 %_struct_9 %_struct_10 %_struct_11 OpGroupMemberDecorate %5 %_struct_6 0 %_struct_7 0 %_struct_8 0 %_struct_9 0 %_struct_10 0 %_struct_11 0 OpDecorate %12 DescriptorSet 0 OpDecorate %13 DescriptorSet 0 OpDecorate %13 NonWritable OpDecorate %13 Restrict %14 = OpDecorationGroup %12 = OpDecorationGroup %13 = OpDecorationGroup OpGroupDecorate %12 %15 OpGroupDecorate %12 %15 OpGroupDecorate %12 %15 OpDecorate %15 DescriptorSet 0 OpDecorate %15 Binding 5 OpGroupDecorate %14 %16 OpDecorate %16 DescriptorSet 0 OpDecorate %16 Binding 0 OpGroupDecorate %12 %17 OpDecorate %17 Binding 1 OpGroupDecorate %13 %18 %19 OpDecorate %18 Binding 2 OpDecorate %19 Binding 3 OpGroupDecorate %14 %20 OpGroupDecorate %12 %20 OpGroupDecorate %13 %20 OpDecorate %20 Binding 4 %bool = OpTypeBool %void = OpTypeVoid %23 = OpTypeFunction %void %uint = OpTypeInt 32 0 %int = OpTypeInt 32 1 %float = OpTypeFloat 32 %v3uint = OpTypeVector %uint 3 %v3float = OpTypeVector %float 3 %_ptr_Input_v3uint = OpTypePointer Input %v3uint %_ptr_Uniform_int = OpTypePointer Uniform %int %_ptr_Uniform_float = OpTypePointer Uniform %float %_runtimearr_int = OpTypeRuntimeArray %int %_runtimearr_float = OpTypeRuntimeArray %float %gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input %int_0 = OpConstant %int 0 %_struct_6 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_6 = OpTypePointer Uniform %_struct_6 %15 = OpVariable %_ptr_Uniform__struct_6 Uniform %_struct_7 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_7 = OpTypePointer Uniform %_struct_7 %16 = OpVariable %_ptr_Uniform__struct_7 Uniform %_struct_8 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_8 = OpTypePointer Uniform %_struct_8 %17 = OpVariable %_ptr_Uniform__struct_8 Uniform %_struct_9 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_9 = OpTypePointer Uniform %_struct_9 %18 = OpVariable %_ptr_Uniform__struct_9 Uniform %_struct_10 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_10 = OpTypePointer Uniform %_struct_10 %19 = OpVariable %_ptr_Uniform__struct_10 Uniform %_struct_11 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_11 = OpTypePointer Uniform %_struct_11 %20 = OpVariable %_ptr_Uniform__struct_11 Uniform %main = OpFunction %void None %23 %40 = OpLabel %41 = OpLoad %v3uint %gl_GlobalInvocationID %42 = OpCompositeExtract %uint %41 0 %43 = OpAccessChain %_ptr_Uniform_float %16 %int_0 %42 %44 = OpAccessChain %_ptr_Uniform_float %17 %int_0 %42 %45 = OpAccessChain %_ptr_Uniform_float %18 %int_0 %42 %46 = OpAccessChain %_ptr_Uniform_float %19 %int_0 %42 %47 = OpAccessChain %_ptr_Uniform_float %20 %int_0 %42 %48 = OpAccessChain %_ptr_Uniform_float %15 %int_0 %42 %49 = OpLoad %float %43 %50 = OpLoad %float %44 %51 = OpLoad %float %45 %52 = OpLoad %float %46 %53 = OpLoad %float %47 %54 = OpFAdd %float %49 %50 %55 = OpFAdd %float %54 %51 %56 = OpFAdd %float %55 %52 %57 = OpFAdd %float %56 %53 OpStore %48 %57 OpReturn OpFunctionEnd )"; // CreateDescriptorSetLayout VkDescriptorSetLayoutBinding dslb[6] = {}; for (auto i = 0; i < 6; i++) { dslb[i].binding = i; dslb[i].descriptorCount = 1; dslb[i].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb[i].pImmutableSamplers = NULL; dslb[i].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_ALL; } VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.flags = 0; ds_layout_ci.bindingCount = 6; ds_layout_ci.pBindings = dslb; VkDescriptorSetLayout ds_layout = {}; vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); // CreatePipelineLayout VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.flags = 0; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); // Create DescriptorPool VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; ds_type_count.descriptorCount = 6; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool = VK_NULL_HANDLE; vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); // AllocateDescriptorSets VkDescriptorSetAllocateInfo ds_alloc_info = {}; ds_alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.descriptorPool = ds_pool; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet descriptorSet; vkAllocateDescriptorSets(m_device->device(), &ds_alloc_info, &descriptorSet); // CreateShaderModule std::vector<unsigned int> spv; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; ASMtoSPV(SPV_ENV_VULKAN_1_0, 0, spv_source.data(), spv); module_create_info.pCode = spv.data(); module_create_info.codeSize = spv.size() * sizeof(unsigned int); module_create_info.flags = 0; vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); // CreateComputePipelines VkComputePipelineCreateInfo pipeline_info = {}; pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; pipeline_info.pNext = nullptr; pipeline_info.flags = 0; pipeline_info.layout = pipeline_layout; pipeline_info.basePipelineHandle = VK_NULL_HANDLE; pipeline_info.basePipelineIndex = -1; pipeline_info.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; pipeline_info.stage.pNext = nullptr; pipeline_info.stage.flags = 0; pipeline_info.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; pipeline_info.stage.module = shader_module; pipeline_info.stage.pName = "main"; pipeline_info.stage.pSpecializationInfo = nullptr; VkPipeline cs_pipeline; m_errorMonitor->ExpectSuccess(); vkCreateComputePipelines(device(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &cs_pipeline); m_errorMonitor->VerifyNotFound(); vkDestroyPipeline(device(), cs_pipeline, nullptr); vkDestroyShaderModule(device(), shader_module, nullptr); vkDestroyDescriptorPool(device(), ds_pool, nullptr); vkDestroyPipelineLayout(device(), pipeline_layout, nullptr); vkDestroyDescriptorSetLayout(device(), ds_layout, nullptr); } TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension1of2) { // This is a positive test, no errors expected // Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 1 of 2"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); // These tests require that the device support multiViewport if (!m_device->phy().features().multiViewport) { printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Vertex shader using viewport array capability char const *vsSource = "#version 450\n" "#extension GL_ARB_shader_viewport_layer_array : enable\n" "void main() {\n" " gl_ViewportIndex = 1;\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); const VkPipelineLayoutObj pipe_layout(m_device, {}); m_errorMonitor->ExpectSuccess(); pipe.CreateVKPipeline(pipe_layout.handle(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension2of2) { // This is a positive test, no errors expected // Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 2 of 2"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); // These tests require that the device support multiViewport if (!m_device->phy().features().multiViewport) { printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Vertex shader using viewport array capability char const *vsSource = "#version 450\n" "#extension GL_ARB_shader_viewport_layer_array : enable\n" "void main() {\n" " gl_ViewportIndex = 1;\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); const VkPipelineLayoutObj pipe_layout(m_device, {}); m_errorMonitor->ExpectSuccess(); pipe.CreateVKPipeline(pipe_layout.handle(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentInputNotProvided) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader input which is not present in the outputs of the previous stage"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) in float x;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentInputNotProvidedInBlock) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader input within an interace block, which is not present in the outputs " "of the previous stage."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0) float x; } ins;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatchArraySize) { TEST_DESCRIPTION("Test that an error is produced for mismatched array sizes across the vertex->fragment shader interface"); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Type mismatch on location 0.0: 'ptr to output arr[2] of float32' vs 'ptr to input arr[1] of float32'"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out float x[2];\n" "void main(){\n" " x[0] = 0; x[1] = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) in float x[1];\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(x[0]);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatch) { TEST_DESCRIPTION("Test that an error is produced for mismatched types across the vertex->fragment shader interface"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Type mismatch on location 0"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out int x;\n" "void main(){\n" " x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) in float x;\n" /* VS writes int */ "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatchInBlock) { TEST_DESCRIPTION( "Test that an error is produced for mismatched types across the vertex->fragment shader interface, when the variable is " "contained within an interface block"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Type mismatch on location 0"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "out block { layout(location=0) int x; } outs;\n" "void main(){\n" " outs.x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0) float x; } ins;\n" /* VS writes int */ "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByLocation) { TEST_DESCRIPTION( "Test that an error is produced for location mismatches across the vertex->fragment shader interface; This should manifest " "as a not-written/not-consumed pair, but flushes out broken walking of the interfaces"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0.0 which is not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "out block { layout(location=1) float x; } outs;\n" "void main(){\n" " outs.x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0) float x; } ins;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByComponent) { TEST_DESCRIPTION( "Test that an error is produced for component mismatches across the vertex->fragment shader interface. It's not enough to " "have the same set of locations in use; matching is defined in terms of spirv variables."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0.1 which is not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "out block { layout(location=0, component=0) float x; } outs;\n" "void main(){\n" " outs.x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0, component=1) float x; } ins;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByPrecision) { TEST_DESCRIPTION("Test that the RelaxedPrecision decoration is validated to match"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "layout(location=0) out mediump float x;\n" "void main() { gl_Position = vec4(0); x = 1.0; }\n"; char const *fsSource = "#version 450\n" "layout(location=0) in highp float x;\n" "layout(location=0) out vec4 color;\n" "void main() { color = vec4(x); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "differ in precision"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByPrecisionBlock) { TEST_DESCRIPTION("Test that the RelaxedPrecision decoration is validated to match"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "out block { layout(location=0) mediump float x; };\n" "void main() { gl_Position = vec4(0); x = 1.0; }\n"; char const *fsSource = "#version 450\n" "in block { layout(location=0) highp float x; };\n" "layout(location=0) out vec4 color;\n" "void main() { color = vec4(x); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "differ in precision"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribNotConsumed) { TEST_DESCRIPTION("Test that a warning is produced for a vertex attribute which is not consumed by the vertex shader"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "location 0 not consumed by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribLocationMismatch) { TEST_DESCRIPTION( "Test that a warning is produced for a location mismatch on vertex attributes. This flushes out bad behavior in the " "interface walker"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "location 0 not consumed by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=1) in float x;\n" "void main(){\n" " gl_Position = vec4(x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); m_errorMonitor->SetUnexpectedError("Vertex shader consumes input at location 1 but not provided"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribNotProvided) { TEST_DESCRIPTION("Test that an error is produced for a vertex shader input which is not provided by a vertex attribute"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Vertex shader consumes input at location 0 but not provided"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) in vec4 x;\n" /* not provided */ "void main(){\n" " gl_Position = x;\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a mismatch between the fundamental type (float/int/uint) of an attribute and the " "vertex shader input that consumes it"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0 does not match vertex shader input type"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=0) in int x;\n" /* attrib provided float */ "void main(){\n" " gl_Position = vec4(x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineDuplicateStage) { TEST_DESCRIPTION("Test that an error is produced for a pipeline containing multiple shaders for the same stage"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Multiple shaders provided for stage VK_SHADER_STAGE_VERTEX_BIT"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&vs); // intentionally duplicate vertex shader attachment pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineMissingEntrypoint) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "No entrypoint found named `foo`"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "void main(){\n" " gl_Position = vec4(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this, "foo"); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineDepthStencilRequired) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "pDepthStencilState is NULL when rasterization is enabled and subpass uses a depth/stencil attachment"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "void main(){ gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkAttachmentDescription attachments[] = { { 0, VK_FORMAT_B8G8R8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }, { 0, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }, }; VkAttachmentReference refs[] = { {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}, }; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &refs[0], nullptr, &refs[1], 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, attachments, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), rp); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, CreatePipelineTessPatchDecorationMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a variable output from the TCS without the patch decoration, but consumed in the TES " "with the decoration."); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "is per-vertex in tessellation control shader stage but per-patch in tessellation evaluation shader stage"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "void main(){}\n"; char const *tcsSource = "#version 450\n" "layout(location=0) out int x[];\n" "layout(vertices=3) out;\n" "void main(){\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" " x[gl_InvocationID] = gl_InvocationID;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "layout(location=0) patch in int x;\n" "void main(){\n" " gl_Position.xyz = gl_TessCoord;\n" " gl_Position.w = x;\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkPipelineObj pipe(m_device); pipe.SetInputAssembly(&iasci); pipe.SetTessellation(&tsci); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineTessErrors) { TEST_DESCRIPTION("Test various errors when creating a graphics pipeline with tessellation stages active."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "void main(){}\n"; char const *tcsSource = "#version 450\n" "layout(vertices=3) out;\n" "void main(){\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "void main(){\n" " gl_Position.xyz = gl_TessCoord;\n" " gl_Position.w = 0;\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); { VkPipelineObj pipe(m_device); VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci; iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // otherwise we get a failure about invalid topology pipe.SetInputAssembly(&iasci_bad); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); // Pass a tess control shader without a tess eval shader pipe.AddShader(&tcs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } { VkPipelineObj pipe(m_device); VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci; iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // otherwise we get a failure about invalid topology pipe.SetInputAssembly(&iasci_bad); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); // Pass a tess eval shader without a tess control shader pipe.AddShader(&tes); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } { VkPipelineObj pipe(m_device); pipe.SetInputAssembly(&iasci); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); // Pass patch topology without tessellation shaders m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-topology-00737"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); pipe.AddShader(&tcs); pipe.AddShader(&tes); // Pass a NULL pTessellationState (with active tessellation shader stages) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00731"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); // Pass an invalid pTessellationState (bad sType) VkPipelineTessellationStateCreateInfo tsci_bad = tsci; tsci_bad.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; pipe.SetTessellation(&tsci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineTessellationStateCreateInfo-sType-sType"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); // Pass out-of-range patchControlPoints tsci_bad = tsci; tsci_bad.patchControlPoints = 0; pipe.SetTessellation(&tsci); pipe.SetTessellation(&tsci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); tsci_bad.patchControlPoints = m_device->props.limits.maxTessellationPatchSize + 1; pipe.SetTessellation(&tsci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); pipe.SetTessellation(&tsci); // Pass an invalid primitive topology VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci; iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; pipe.SetInputAssembly(&iasci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); pipe.SetInputAssembly(&iasci); } } TEST_F(VkLayerTest, CreatePipelineAttribBindingConflict) { TEST_DESCRIPTION( "Test that an error is produced for a vertex attribute setup where multiple bindings provide the same location"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Duplicate vertex input binding descriptions for binding 0"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); /* Two binding descriptions for binding 0 */ VkVertexInputBindingDescription input_bindings[2]; memset(input_bindings, 0, sizeof(input_bindings)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=0) in float x;\n" /* attrib provided float */ "void main(){\n" " gl_Position = vec4(x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(input_bindings, 2); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputNotWritten) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader which does not provide an output for one of the pipeline's color " "attachments"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "Attachment 0 not written by fragment shader"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "void main(){\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0, not written */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineFragmentOutputNotWrittenButMasked) { TEST_DESCRIPTION( "Test that no error is produced when the fragment shader fails to declare an output, but the corresponding attachment's " "write mask is 0."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "void main(){\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0, not written, but also masked */ pipe.AddDefaultColorAttachment(0); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputNotConsumed) { TEST_DESCRIPTION( "Test that a warning is produced for a fragment shader which provides a spurious output with no matching attachment"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "fragment shader writes to output location 1 with no matching attachment"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(location=1) out vec4 y;\n" /* no matching attachment for this */ "void main(){\n" " x = vec4(1);\n" " y = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0, not written */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); /* FS writes CB 1, but we don't configure it */ VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a mismatch between the fundamental type of an fragment shader output variable, and the " "format of the corresponding attachment"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "does not match fragment shader output type"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out ivec4 x;\n" /* not UNORM */ "void main(){\n" " x = ivec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0; type is UNORM by default */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineExceedMaxVertexOutputComponents) { TEST_DESCRIPTION( "Test that an error is produced when the number of output components from the vertex stage exceeds the device limit"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Vertex shader exceeds " "VkPhysicalDeviceLimits::maxVertexOutputComponents"); ASSERT_NO_FATAL_FAILURE(Init()); const uint32_t maxVsOutComp = m_device->props.limits.maxVertexOutputComponents; std::string vsSourceStr = "#version 450\n\n"; const uint32_t numVec4 = maxVsOutComp / 4; uint32_t location = 0; for (uint32_t i = 0; i < numVec4; i++) { vsSourceStr += "layout(location=" + std::to_string(location) + ") out vec4 v" + std::to_string(i) + ";\n"; location += 1; } const uint32_t remainder = maxVsOutComp % 4; if (remainder != 0) { if (remainder == 1) { vsSourceStr += "layout(location=" + std::to_string(location) + ") out float" + " vn;\n"; } else { vsSourceStr += "layout(location=" + std::to_string(location) + ") out vec" + std::to_string(remainder) + " vn;\n"; } location += 1; } vsSourceStr += "layout(location=" + std::to_string(location) + ") out vec4 exceedLimit;\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; std::string fsSourceStr = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSourceStr.c_str(), VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSourceStr.c_str(), VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); // Set up CB 0; type is UNORM by default pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineExceedMaxTessellationControlInputOutputComponents) { TEST_DESCRIPTION( "Test that errors are produced when the number of per-vertex input and/or output components to the tessellation control " "stage exceeds the device limit"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Tessellation control shader exceeds " "VkPhysicalDeviceLimits::maxTessellationControlPerVertexInputComponents"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Tessellation control shader exceeds " "VkPhysicalDeviceLimits::maxTessellationControlPerVertexOutputComponents"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures feat; vkGetPhysicalDeviceFeatures(gpu(), &feat); if (!feat.tessellationShader) { printf("%s tessellation shader stage(s) unsupported.\n", kSkipPrefix); return; } std::string vsSourceStr = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; // Tessellation control stage std::string tcsSourceStr = "#version 450\n" "\n"; // Input components const uint32_t maxTescInComp = m_device->props.limits.maxTessellationControlPerVertexInputComponents; const uint32_t numInVec4 = maxTescInComp / 4; uint32_t inLocation = 0; for (uint32_t i = 0; i < numInVec4; i++) { tcsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 v" + std::to_string(i) + "In[];\n"; inLocation += 1; } const uint32_t inRemainder = maxTescInComp % 4; if (inRemainder != 0) { if (inRemainder == 1) { tcsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in float" + " vnIn[];\n"; } else { tcsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec" + std::to_string(inRemainder) + " vnIn[];\n"; } inLocation += 1; } tcsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 exceedLimitIn[];\n\n"; // Output components const uint32_t maxTescOutComp = m_device->props.limits.maxTessellationControlPerVertexOutputComponents; const uint32_t numOutVec4 = maxTescOutComp / 4; uint32_t outLocation = 0; for (uint32_t i = 0; i < numOutVec4; i++) { tcsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 v" + std::to_string(i) + "Out[3];\n"; outLocation += 1; } const uint32_t outRemainder = maxTescOutComp % 4; if (outRemainder != 0) { if (outRemainder == 1) { tcsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out float" + " vnOut[3];\n"; } else { tcsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec" + std::to_string(outRemainder) + " vnOut[3];\n"; } outLocation += 1; } tcsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 exceedLimitOut[3];\n"; tcsSourceStr += "layout(vertices=3) out;\n"; // Finalize tcsSourceStr += "\n" "void main(){\n" " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n" "}\n"; std::string tesSourceStr = "#version 450\n" "\n" "layout(triangles) in;" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; std::string fsSourceStr = "#version 450\n" "\n" "layout(location=0) out vec4 color;" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSourceStr.c_str(), VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSourceStr.c_str(), VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSourceStr.c_str(), VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSourceStr.c_str(), VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); // Set up CB 0; type is UNORM by default pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo = {}; inputAssemblyInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; inputAssemblyInfo.pNext = NULL; inputAssemblyInfo.flags = 0; inputAssemblyInfo.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; inputAssemblyInfo.primitiveRestartEnable = VK_FALSE; pipe.SetInputAssembly(&inputAssemblyInfo); VkPipelineTessellationStateCreateInfo tessInfo = {}; tessInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; tessInfo.pNext = NULL; tessInfo.flags = 0; tessInfo.patchControlPoints = 3; pipe.SetTessellation(&tessInfo); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineExceedMaxTessellationEvaluationInputOutputComponents) { TEST_DESCRIPTION( "Test that errors are produced when the number of input and/or output components to the tessellation evaluation stage " "exceeds the device limit"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Tessellation evaluation shader exceeds " "VkPhysicalDeviceLimits::maxTessellationEvaluationInputComponents"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Tessellation evaluation shader exceeds " "VkPhysicalDeviceLimits::maxTessellationEvaluationOutputComponents"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures feat; vkGetPhysicalDeviceFeatures(gpu(), &feat); if (!feat.tessellationShader) { printf("%s tessellation shader stage(s) unsupported.\n", kSkipPrefix); return; } std::string vsSourceStr = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; std::string tcsSourceStr = "#version 450\n" "\n" "layout (vertices = 3) out;\n" "\n" "void main(){\n" " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n" "}\n"; // Tessellation evaluation stage std::string tesSourceStr = "#version 450\n" "\n" "layout (triangles) in;\n" "\n"; // Input components const uint32_t maxTeseInComp = m_device->props.limits.maxTessellationEvaluationInputComponents; const uint32_t numInVec4 = maxTeseInComp / 4; uint32_t inLocation = 0; for (uint32_t i = 0; i < numInVec4; i++) { tesSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 v" + std::to_string(i) + "In[];\n"; inLocation += 1; } const uint32_t inRemainder = maxTeseInComp % 4; if (inRemainder != 0) { if (inRemainder == 1) { tesSourceStr += "layout(location=" + std::to_string(inLocation) + ") in float" + " vnIn[];\n"; } else { tesSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec" + std::to_string(inRemainder) + " vnIn[];\n"; } inLocation += 1; } tesSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 exceedLimitIn[];\n\n"; // Output components const uint32_t maxTeseOutComp = m_device->props.limits.maxTessellationEvaluationOutputComponents; const uint32_t numOutVec4 = maxTeseOutComp / 4; uint32_t outLocation = 0; for (uint32_t i = 0; i < numOutVec4; i++) { tesSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 v" + std::to_string(i) + "Out;\n"; outLocation += 1; } const uint32_t outRemainder = maxTeseOutComp % 4; if (outRemainder != 0) { if (outRemainder == 1) { tesSourceStr += "layout(location=" + std::to_string(outLocation) + ") out float" + " vnOut;\n"; } else { tesSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec" + std::to_string(outRemainder) + " vnOut;\n"; } outLocation += 1; } tesSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 exceedLimitOut;\n"; // Finalize tesSourceStr += "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; std::string fsSourceStr = "#version 450\n" "\n" "layout(location=0) out vec4 color;" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSourceStr.c_str(), VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSourceStr.c_str(), VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSourceStr.c_str(), VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSourceStr.c_str(), VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); // Set up CB 0; type is UNORM by default pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo = {}; inputAssemblyInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; inputAssemblyInfo.pNext = NULL; inputAssemblyInfo.flags = 0; inputAssemblyInfo.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; inputAssemblyInfo.primitiveRestartEnable = VK_FALSE; pipe.SetInputAssembly(&inputAssemblyInfo); VkPipelineTessellationStateCreateInfo tessInfo = {}; tessInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; tessInfo.pNext = NULL; tessInfo.flags = 0; tessInfo.patchControlPoints = 3; pipe.SetTessellation(&tessInfo); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineExceedMaxGeometryInputOutputComponents) { TEST_DESCRIPTION( "Test that errors are produced when the number of input and/or output components to the geometry stage exceeds the device " "limit"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Geometry shader exceeds " "VkPhysicalDeviceLimits::maxGeometryInputComponents"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Geometry shader exceeds " "VkPhysicalDeviceLimits::maxGeometryOutputComponents"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures feat; vkGetPhysicalDeviceFeatures(gpu(), &feat); if (!feat.geometryShader) { printf("%s geometry shader stage unsupported.\n", kSkipPrefix); return; } std::string vsSourceStr = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; std::string gsSourceStr = "#version 450\n" "\n" "layout(triangles) in;\n" "layout(invocations=1) in;\n"; // Input components const uint32_t maxGeomInComp = m_device->props.limits.maxGeometryInputComponents; const uint32_t numInVec4 = maxGeomInComp / 4; uint32_t inLocation = 0; for (uint32_t i = 0; i < numInVec4; i++) { gsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 v" + std::to_string(i) + "In[];\n"; inLocation += 1; } const uint32_t inRemainder = maxGeomInComp % 4; if (inRemainder != 0) { if (inRemainder == 1) { gsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in float" + " vnIn[];\n"; } else { gsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec" + std::to_string(inRemainder) + " vnIn[];\n"; } inLocation += 1; } gsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 exceedLimitIn[];\n\n"; // Output components const uint32_t maxGeomOutComp = m_device->props.limits.maxGeometryOutputComponents; const uint32_t numOutVec4 = maxGeomOutComp / 4; uint32_t outLocation = 0; for (uint32_t i = 0; i < numOutVec4; i++) { gsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 v" + std::to_string(i) + "Out;\n"; outLocation += 1; } const uint32_t outRemainder = maxGeomOutComp % 4; if (outRemainder != 0) { if (outRemainder == 1) { gsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out float" + " vnOut;\n"; } else { gsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec" + std::to_string(outRemainder) + " vnOut;\n"; } outLocation += 1; } gsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 exceedLimitOut;\n"; // Finalize gsSourceStr += "layout(triangle_strip, max_vertices=3) out;\n" "\n" "void main(){\n" " exceedLimitOut = vec4(1);\n" "}\n"; std::string fsSourceStr = "#version 450\n" "\n" "layout(location=0) out vec4 color;" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSourceStr.c_str(), VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj gs(m_device, gsSourceStr.c_str(), VK_SHADER_STAGE_GEOMETRY_BIT, this); VkShaderObj fs(m_device, fsSourceStr.c_str(), VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&gs); pipe.AddShader(&fs); // Set up CB 0; type is UNORM by default pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineExceedMaxFragmentInputComponents) { TEST_DESCRIPTION( "Test that an error is produced when the number of input components from the fragment stage exceeds the device limit"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Fragment shader exceeds " "VkPhysicalDeviceLimits::maxFragmentInputComponents"); ASSERT_NO_FATAL_FAILURE(Init()); std::string vsSourceStr = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; const uint32_t maxFsInComp = m_device->props.limits.maxFragmentInputComponents; std::string fsSourceStr = "#version 450\n\n"; const uint32_t numVec4 = maxFsInComp / 4; uint32_t location = 0; for (uint32_t i = 0; i < numVec4; i++) { fsSourceStr += "layout(location=" + std::to_string(location) + ") in vec4 v" + std::to_string(i) + ";\n"; location += 1; } const uint32_t remainder = maxFsInComp % 4; if (remainder != 0) { if (remainder == 1) { fsSourceStr += "layout(location=" + std::to_string(location) + ") in float" + " vn;\n"; } else { fsSourceStr += "layout(location=" + std::to_string(location) + ") in vec" + std::to_string(remainder) + " vn;\n"; } location += 1; } fsSourceStr += "layout(location=" + std::to_string(location) + ") in vec4 exceedLimit;\n" "\n" "layout(location=0) out vec4 color;" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSourceStr.c_str(), VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSourceStr.c_str(), VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); // Set up CB 0; type is UNORM by default pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineUniformBlockNotProvided) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming a uniform block which has no corresponding binding in the pipeline " "layout"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not declared in pipeline layout"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0; type is UNORM by default */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelinePushConstantsNotInLayout) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming push constants which are not provided in the pipeline layout"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not declared in layout"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "layout(push_constant, std430) uniform foo { float x; } consts;\n" "void main(){\n" " gl_Position = vec4(consts.x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0; type is UNORM by default */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); /* should have generated an error -- no push constant ranges provided! */ m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentMissing) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment which is not included in the subpass " "description"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "consumes input attachment index 0 but not provided in subpass"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); // error here. pipe.CreateVKPipeline(pl.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment with a format having a different fundamental " "type"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "input attachment 0 format of VK_FORMAT_R8G8B8A8_UINT does not match"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); VkAttachmentDescription descs[2] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UINT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference color = { 0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }; VkAttachmentReference input = { 1, VK_IMAGE_LAYOUT_GENERAL, }; VkSubpassDescription sd = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input, 1, &color, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descs, 1, &sd, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // error here. pipe.CreateVKPipeline(pl.handle(), rp); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentMissingArray) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment which is not included in the subpass " "description -- array case"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "consumes input attachment index 0 but not provided in subpass"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput xs[1];\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(xs[0]);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 2, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); // error here. pipe.CreateVKPipeline(pl.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateComputePipelineMissingDescriptor) { TEST_DESCRIPTION( "Test that an error is produced for a compute pipeline consuming a descriptor which is not provided in the pipeline " "layout"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Shader uses descriptor slot 0.0"); ASSERT_NO_FATAL_FAILURE(Init()); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) buffer block { vec4 x; };\n" "void main(){\n" " x = vec4(1);\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, descriptorSet.GetPipelineLayout(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkLayerTest, CreateComputePipelineDescriptorTypeMismatch) { TEST_DESCRIPTION("Test that an error is produced for a pipeline consuming a descriptor-backed resource of a mismatched type"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "but descriptor of type VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {binding}); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) buffer block { vec4 x; };\n" "void main() {\n" " x.x = 1.0f;\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkLayerTest, DrawTimeImageViewTypeMismatchWithPipeline) { TEST_DESCRIPTION( "Test that an error is produced when an image view type does not match the dimensionality declared in the shader"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "requires an image view of type VK_IMAGE_VIEW_TYPE_3D"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler3D s;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = texture(s, vec3(0));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkTextureObj texture(m_device, nullptr); VkSamplerObj sampler(m_device); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendSamplerTexture(&sampler, &texture); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptorSet); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // error produced here. vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DrawTimeImageMultisampleMismatchWithPipeline) { TEST_DESCRIPTION( "Test that an error is produced when a multisampled images are consumed via singlesample images types in the shader, or " "vice versa."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "requires bound image to have multiple samples"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2DMS s;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = texelFetch(s, ivec2(0), 0);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkTextureObj texture(m_device, nullptr); // THIS LINE CAUSES CRASH ON MALI VkSamplerObj sampler(m_device); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendSamplerTexture(&sampler, &texture); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptorSet); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // error produced here. vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DrawTimeImageComponentTypeMismatchWithPipeline) { TEST_DESCRIPTION( "Test that an error is produced when the component type of an imageview disagrees with the type in the shader."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "SINT component type, but bound descriptor"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform isampler2D s;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = texelFetch(s, ivec2(0), 0);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkTextureObj texture(m_device, nullptr); // UNORM texture by default, incompatible with isampler2D VkSamplerObj sampler(m_device); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendSamplerTexture(&sampler, &texture); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptorSet); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // error produced here. vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, AttachmentDescriptionUndefinedFormat) { TEST_DESCRIPTION("Create a render pass with an attachment description format set to VK_FORMAT_UNDEFINED"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "format is VK_FORMAT_UNDEFINED"); VkAttachmentReference color_attach = {}; color_attach.layout = VK_IMAGE_LAYOUT_GENERAL; color_attach.attachment = 0; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_UNDEFINED; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult result = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); if (result == VK_SUCCESS) { vkDestroyRenderPass(m_device->device(), rp, NULL); } } TEST_F(VkLayerTest, CreateImageViewNoMemoryBoundToImage) { VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindImageMemory()."); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image and try to create a view with no memory backing the image VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); // If last error is success, it still created the view, so delete it. if (err == VK_SUCCESS) { vkDestroyImageView(m_device->device(), view, NULL); } } TEST_F(VkLayerTest, InvalidImageViewAspect) { TEST_DESCRIPTION("Create an image and try to create a view with an invalid aspectMask"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; VkImageObj image(m_device); image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_LINEAR, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image.handle(); image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.layerCount = 1; // Cause an error by setting an invalid image aspect image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; VkImageView view; vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ExerciseGetImageSubresourceLayout) { TEST_DESCRIPTION("Test vkGetImageSubresourceLayout() valid usages"); ASSERT_NO_FATAL_FAILURE(Init()); VkSubresourceLayout subres_layout = {}; // VU 00732: image must have been created with tiling equal to VK_IMAGE_TILING_LINEAR { const VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL; // ERROR: violates VU 00732 VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, tiling); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = 0; subres.arrayLayer = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-image-00996"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } // VU 00733: The aspectMask member of pSubresource must only have a single bit set { VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_METADATA_BIT; // ERROR: triggers VU 00733 subres.mipLevel = 0; subres.arrayLayer = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-aspectMask-00997"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } // 00739 mipLevel must be less than the mipLevels specified in VkImageCreateInfo when the image was created { VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = 1; // ERROR: triggers VU 00739 subres.arrayLayer = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-mipLevel-01716"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } // 00740 arrayLayer must be less than the arrayLayers specified in VkImageCreateInfo when the image was created { VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = 0; subres.arrayLayer = 1; // ERROR: triggers VU 00740 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-arrayLayer-01717"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, CopyImageLayerCountMismatch) { TEST_DESCRIPTION( "Try to copy between images with the source subresource having a different layerCount than the destination subresource"); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images to copy between VkImageObj src_image_obj(m_device); VkImageObj dst_image_obj(m_device); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 4; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = 0; src_image_obj.init(&image_create_info); ASSERT_TRUE(src_image_obj.initialized()); image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; dst_image_obj.init(&image_create_info); ASSERT_TRUE(dst_image_obj.initialized()); m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; // Introduce failure by forcing the dst layerCount to differ from src copyRegion.dstSubresource.layerCount = 3; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = 1; copyRegion.extent.height = 1; copyRegion.extent.depth = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-extent-00140"); m_commandBuffer->CopyImage(src_image_obj.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image_obj.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageLayerUnsupportedFormat) { TEST_DESCRIPTION("Creating images with unsupported formats "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create image with unsupported format - Expect FORMAT_UNSUPPORTED VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_UNDEFINED; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-format-00943"); VkImage image; vkCreateImage(m_device->handle(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateImageViewFormatMismatchUnrelated) { TEST_DESCRIPTION("Create an image with a color format, then try to create a depth view of it"); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); // Load required functions PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = (PFN_vkSetPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceFormatPropertiesEXT"); PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = (PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkGetOriginalPhysicalDeviceFormatPropertiesEXT"); if (!(fpvkSetPhysicalDeviceFormatPropertiesEXT) || !(fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix); return; } auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't find depth stencil image format.\n", kSkipPrefix); return; } VkFormatProperties formatProps; fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), depth_format, &formatProps); formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), depth_format, formatProps); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView imgView; VkImageViewCreateInfo imgViewInfo = {}; imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; imgViewInfo.image = image.handle(); imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; imgViewInfo.format = depth_format; imgViewInfo.subresourceRange.layerCount = 1; imgViewInfo.subresourceRange.baseMipLevel = 0; imgViewInfo.subresourceRange.levelCount = 1; imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Can't use depth format for view into color image - Expect INVALID_FORMAT m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation."); vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateImageViewNoMutableFormatBit) { TEST_DESCRIPTION("Create an image view with a different format, when the image does not have MUTABLE_FORMAT bit"); if (!EnableDeviceProfileLayer()) { printf("%s Couldn't enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Required extensions are not present.\n", kSkipPrefix); return; } VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkFormatProperties formatProps; fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_B8G8R8A8_UINT, &formatProps); formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_B8G8R8A8_UINT, formatProps); VkImageView imgView; VkImageViewCreateInfo imgViewInfo = {}; imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; imgViewInfo.image = image.handle(); imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; imgViewInfo.format = VK_FORMAT_B8G8R8A8_UINT; imgViewInfo.subresourceRange.layerCount = 1; imgViewInfo.subresourceRange.baseMipLevel = 0; imgViewInfo.subresourceRange.levelCount = 1; imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Same compatibility class but no MUTABLE_FORMAT bit - Expect // VIEW_CREATE_ERROR m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01019"); vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateImageViewDifferentClass) { TEST_DESCRIPTION("Passing bad parameters to CreateImageView"); ASSERT_NO_FATAL_FAILURE(Init()); if (!(m_device->format_properties(VK_FORMAT_R8_UINT).optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) { printf("%s Device does not support R8_UINT as color attachment; skipped", kSkipPrefix); return; } VkImageCreateInfo mutImgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R8_UINT, {128, 128, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj mutImage(m_device); mutImage.init(&mutImgInfo); ASSERT_TRUE(mutImage.initialized()); VkImageView imgView; VkImageViewCreateInfo imgViewInfo = {}; imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; imgViewInfo.format = VK_FORMAT_B8G8R8A8_UNORM; imgViewInfo.subresourceRange.layerCount = 1; imgViewInfo.subresourceRange.baseMipLevel = 0; imgViewInfo.subresourceRange.levelCount = 1; imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imgViewInfo.image = mutImage.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01018"); vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, MultiplaneIncompatibleViewFormat) { TEST_DESCRIPTION("Postive/negative tests of multiplane imageview format compatibility"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify format VkFormatFeatureFlags features = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; } VkImageObj image_obj(m_device); image_obj.init(&ci); ASSERT_TRUE(image_obj.initialized()); VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image_obj.image(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8_SNORM; // Compat is VK_FORMAT_R8_UNORM ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT; // Incompatible format error VkImageView imageView = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01586"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed imageView = VK_NULL_HANDLE; // Correct format succeeds ivci.format = VK_FORMAT_R8_UNORM; m_errorMonitor->ExpectSuccess(); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed imageView = VK_NULL_HANDLE; // Try a multiplane imageview ivci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->ExpectSuccess(); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed } TEST_F(VkLayerTest, CreateImageViewInvalidSubresourceRange) { TEST_DESCRIPTION("Passing bad image subrange to CreateImageView"); ASSERT_NO_FATAL_FAILURE(Init()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); VkImageView img_view; VkImageViewCreateInfo img_view_info_template = {}; img_view_info_template.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; img_view_info_template.image = image.handle(); img_view_info_template.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY; img_view_info_template.format = image.format(); // subresourceRange to be filled later for the purposes of this test img_view_info_template.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_view_info_template.subresourceRange.baseMipLevel = 0; img_view_info_template.subresourceRange.levelCount = 0; img_view_info_template.subresourceRange.baseArrayLayer = 0; img_view_info_template.subresourceRange.layerCount = 0; // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01478"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01478"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // These tests rely on having the Maintenance1 extension not being enabled, and are invalid on all but version 1.0 if (m_device->props.apiVersion < VK_API_VERSION_1_1) { // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01480"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01480"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01719"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01719"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01719"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } } } TEST_F(VkLayerTest, CompressedImageMipCopyTests) { TEST_DESCRIPTION("Image/Buffer copies for higher mip levels"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); VkFormat compressed_format = VK_FORMAT_UNDEFINED; if (device_features.textureCompressionBC) { compressed_format = VK_FORMAT_BC3_SRGB_BLOCK; } else if (device_features.textureCompressionETC2) { compressed_format = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK; } else if (device_features.textureCompressionASTC_LDR) { compressed_format = VK_FORMAT_ASTC_4x4_UNORM_BLOCK; } else { printf("%s No compressed formats supported - CompressedImageMipCopyTests skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = compressed_format; ci.extent = {32, 32, 1}; ci.mipLevels = 6; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj image(m_device); image.init(&ci); ASSERT_TRUE(image.initialized()); VkImageObj odd_image(m_device); ci.extent = {31, 32, 1}; // Mips are [31,32] [15,16] [7,8] [3,4], [1,2] [1,1] odd_image.init(&ci); ASSERT_TRUE(odd_image.initialized()); // Allocate buffers VkMemoryPropertyFlags reqs = 0; VkBufferObj buffer_1024, buffer_64, buffer_16, buffer_8; buffer_1024.init_as_src_and_dst(*m_device, 1024, reqs); buffer_64.init_as_src_and_dst(*m_device, 64, reqs); buffer_16.init_as_src_and_dst(*m_device, 16, reqs); buffer_8.init_as_src_and_dst(*m_device, 8, reqs); VkBufferImageCopy region = {}; region.bufferRowLength = 0; region.bufferImageHeight = 0; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageOffset = {0, 0, 0}; region.bufferOffset = 0; // start recording m_commandBuffer->begin(); // Mip level copies that work - 5 levels m_errorMonitor->ExpectSuccess(); // Mip 0 should fit in 1k buffer - 1k texels @ 1b each region.imageExtent = {32, 32, 1}; region.imageSubresource.mipLevel = 0; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_1024.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_1024.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); // Mip 2 should fit in 64b buffer - 64 texels @ 1b each region.imageExtent = {8, 8, 1}; region.imageSubresource.mipLevel = 2; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); // Mip 3 should fit in 16b buffer - 16 texels @ 1b each region.imageExtent = {4, 4, 1}; region.imageSubresource.mipLevel = 3; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); // Mip 4&5 should fit in 16b buffer with no complaint - 4 & 1 texels @ 1b each region.imageExtent = {2, 2, 1}; region.imageSubresource.mipLevel = 4; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); region.imageExtent = {1, 1, 1}; region.imageSubresource.mipLevel = 5; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyNotFound(); // Buffer must accommodate a full compressed block, regardless of texel count m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_8.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00171"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_8.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // Copy width < compressed block size, but not the full mip width region.imageExtent = {1, 2, 1}; region.imageSubresource.mipLevel = 4; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00207"); // width not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00207"); // width not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // Copy height < compressed block size but not the full mip height region.imageExtent = {2, 1, 1}; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // height not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // height not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // Offsets must be multiple of compressed block size region.imageOffset = {1, 1, 0}; region.imageExtent = {1, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageOffset-00205"); // imageOffset not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageOffset-00205"); // imageOffset not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // Offset + extent width = mip width - should succeed region.imageOffset = {4, 4, 0}; region.imageExtent = {3, 4, 1}; region.imageSubresource.mipLevel = 2; m_errorMonitor->ExpectSuccess(); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyNotFound(); // Offset + extent width > mip width, but still within the final compressed block - should succeed region.imageExtent = {4, 4, 1}; m_errorMonitor->ExpectSuccess(); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyNotFound(); // Offset + extent width < mip width and not a multiple of block width - should fail region.imageExtent = {3, 3, 1}; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // offset+extent not a multiple of block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // offset+extent not a multiple of block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageBufferCopyTests) { TEST_DESCRIPTION("Image to buffer and buffer to image tests"); ASSERT_NO_FATAL_FAILURE(Init()); // Bail if any dimension of transfer granularity is 0. auto index = m_device->graphics_queue_node_index_; auto queue_family_properties = m_device->phy().queue_properties(); if ((queue_family_properties[index].minImageTransferGranularity.depth == 0) || (queue_family_properties[index].minImageTransferGranularity.width == 0) || (queue_family_properties[index].minImageTransferGranularity.height == 0)) { printf("%s Subresource copies are disallowed when xfer granularity (x|y|z) is 0. Skipped.\n", kSkipPrefix); return; } VkImageObj image_64k(m_device); // 128^2 texels, 64k VkImageObj image_16k(m_device); // 64^2 texels, 16k VkImageObj image_16k_depth(m_device); // 64^2 texels, depth, 16k VkImageObj ds_image_4D_1S(m_device); // 256^2 texels, 512kb (256k depth, 64k stencil, 192k pack) VkImageObj ds_image_3D_1S(m_device); // 256^2 texels, 256kb (192k depth, 64k stencil) VkImageObj ds_image_2D(m_device); // 256^2 texels, 128k (128k depth) VkImageObj ds_image_1S(m_device); // 256^2 texels, 64k (64k stencil) image_64k.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UINT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_16k.Init(64, 64, 1, VK_FORMAT_R8G8B8A8_UINT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image_64k.initialized()); ASSERT_TRUE(image_16k.initialized()); // Verify all needed Depth/Stencil formats are supported bool missing_ds_support = false; VkFormatProperties props = {0, 0, 0}; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT_S8_UINT, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D24_UNORM_S8_UINT, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D16_UNORM, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_S8_UINT, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; if (!missing_ds_support) { image_16k_depth.Init(64, 64, 1, VK_FORMAT_D24_UNORM_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image_16k_depth.initialized()); ds_image_4D_1S.Init( 256, 256, 1, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_4D_1S.initialized()); ds_image_3D_1S.Init( 256, 256, 1, VK_FORMAT_D24_UNORM_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_3D_1S.initialized()); ds_image_2D.Init( 256, 256, 1, VK_FORMAT_D16_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_2D.initialized()); ds_image_1S.Init( 256, 256, 1, VK_FORMAT_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_1S.initialized()); } // Allocate buffers VkBufferObj buffer_256k, buffer_128k, buffer_64k, buffer_16k; VkMemoryPropertyFlags reqs = 0; buffer_256k.init_as_src_and_dst(*m_device, 262144, reqs); // 256k buffer_128k.init_as_src_and_dst(*m_device, 131072, reqs); // 128k buffer_64k.init_as_src_and_dst(*m_device, 65536, reqs); // 64k buffer_16k.init_as_src_and_dst(*m_device, 16384, reqs); // 16k VkBufferImageCopy region = {}; region.bufferRowLength = 0; region.bufferImageHeight = 0; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageOffset = {0, 0, 0}; region.imageExtent = {64, 64, 1}; region.bufferOffset = 0; // attempt copies before putting command buffer in recording state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-commandBuffer-recording"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-commandBuffer-recording"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // start recording m_commandBuffer->begin(); // successful copies m_errorMonitor->ExpectSuccess(); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); region.imageOffset.x = 16; // 16k copy, offset requires larger image vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); region.imageExtent.height = 78; // > 16k copy requires larger buffer & image vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); region.imageOffset.x = 0; region.imageExtent.height = 64; region.bufferOffset = 256; // 16k copy with buffer offset, requires larger buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region); m_errorMonitor->VerifyNotFound(); // image/buffer too small (extent too large) on copy to image region.imageExtent = {65, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00171"); // buffer too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); // image too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // image/buffer too small (offset) on copy to image region.imageExtent = {64, 64, 1}; region.imageOffset = {0, 4, 0}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00171"); // buffer too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); // image too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // image/buffer too small on copy to buffer region.imageExtent = {64, 64, 1}; region.imageOffset = {0, 0, 0}; region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // buffer too small vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.imageExtent = {64, 65, 1}; region.bufferOffset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00182"); // image too small vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // buffer size OK but rowlength causes loose packing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); region.imageExtent = {64, 64, 1}; region.bufferRowLength = 68; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // An extent with zero area should produce a warning, but no error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_ERROR_BIT_EXT, "} has zero area"); region.imageExtent.width = 0; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // aspect bits region.imageExtent = {64, 64, 1}; region.bufferRowLength = 0; region.bufferImageHeight = 0; if (!missing_ds_support) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-aspectMask-00212"); // more than 1 aspect bit set region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_depth.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-aspectMask-00211"); // different mis-matched aspect region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_depth.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-aspectMask-00211"); // mis-matched aspect region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Out-of-range mip levels should fail region.imageSubresource.mipLevel = image_16k.create_info().mipLevels + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageSubresource-01703"); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00182"); // unavoidable "region exceeds image bounds" for non-existent mip vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageSubresource-01701"); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); // unavoidable "region exceeds image bounds" for non-existent mip vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); region.imageSubresource.mipLevel = 0; // Out-of-range array layers should fail region.imageSubresource.baseArrayLayer = image_16k.create_info().arrayLayers; region.imageSubresource.layerCount = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageSubresource-01704"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageSubresource-01702"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); region.imageSubresource.baseArrayLayer = 0; // Layout mismatch should fail m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00189"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-dstImageLayout-00180"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); // Test Depth/Stencil copies if (missing_ds_support) { printf("%s Depth / Stencil formats unsupported - skipping D/S tests.\n", kSkipPrefix); } else { VkBufferImageCopy ds_region = {}; ds_region.bufferOffset = 0; ds_region.bufferRowLength = 0; ds_region.bufferImageHeight = 0; ds_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; ds_region.imageSubresource.mipLevel = 0; ds_region.imageSubresource.baseArrayLayer = 0; ds_region.imageSubresource.layerCount = 1; ds_region.imageOffset = {0, 0, 0}; ds_region.imageExtent = {256, 256, 1}; // Depth copies that should succeed m_errorMonitor->ExpectSuccess(); // Extract 4b depth per texel, pack into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Extract 3b depth per texel, pack (loose) into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Copy 2b depth per texel, into 128k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_2D.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_128k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); // Depth copies that should fail ds_region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 4b depth per texel, pack into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 3b depth per texel, pack (loose) into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Copy 2b depth per texel, into 128k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_2D.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_128k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); // Stencil copies that should succeed ds_region.bufferOffset = 0; ds_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; m_errorMonitor->ExpectSuccess(); // Extract 1b stencil per texel, pack into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Extract 1b stencil per texel, pack into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Copy 1b depth per texel, into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); // Stencil copies that should fail m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 1b stencil per texel, pack into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_16k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 1b stencil per texel, pack into 64k buffer ds_region.bufferRowLength = 260; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); ds_region.bufferRowLength = 0; ds_region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Copy 1b depth per texel, into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); } // Test compressed formats, if supported VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); if (!(device_features.textureCompressionBC || device_features.textureCompressionETC2 || device_features.textureCompressionASTC_LDR)) { printf("%s No compressed formats supported - block compression tests skipped.\n", kSkipPrefix); } else { VkImageObj image_16k_4x4comp(m_device); // 128^2 texels as 32^2 compressed (4x4) blocks, 16k VkImageObj image_NPOT_4x4comp(m_device); // 130^2 texels as 33^2 compressed (4x4) blocks if (device_features.textureCompressionBC) { image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_BC3_SRGB_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_BC3_SRGB_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); } else if (device_features.textureCompressionETC2) { image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); } else { image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); } ASSERT_TRUE(image_16k_4x4comp.initialized()); // Just fits m_errorMonitor->ExpectSuccess(); region.imageExtent = {128, 128, 1}; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyNotFound(); // with offset, too big for buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); region.bufferOffset = 16; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.bufferOffset = 0; // extents that are not a multiple of compressed block size m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00207"); // extent width not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity region.imageExtent.width = 66; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.imageExtent.width = 128; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // extent height not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity region.imageExtent.height = 2; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.imageExtent.height = 128; // TODO: All available compressed formats are 2D, with block depth of 1. Unable to provoke VU_01277. // non-multiple extents are allowed if at the far edge of a non-block-multiple image - these should pass m_errorMonitor->ExpectSuccess(); region.imageExtent.width = 66; region.imageOffset.x = 64; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); region.imageExtent.width = 16; region.imageOffset.x = 0; region.imageExtent.height = 2; region.imageOffset.y = 128; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyNotFound(); region.imageOffset = {0, 0, 0}; // buffer offset must be a multiple of texel block size (16) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00206"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00193"); region.imageExtent = {64, 64, 1}; region.bufferOffset = 24; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // rowlength not a multiple of block width (4) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferRowLength-00203"); region.bufferOffset = 0; region.bufferRowLength = 130; region.bufferImageHeight = 0; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // imageheight not a multiple of block height (4) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferImageHeight-00204"); region.bufferRowLength = 0; region.bufferImageHeight = 130; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, MiscImageLayerTests) { TEST_DESCRIPTION("Image-related tests that don't belong elsewhere"); ASSERT_NO_FATAL_FAILURE(Init()); // TODO: Ideally we should check if a format is supported, before using it. VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); // 64bpp ASSERT_TRUE(image.initialized()); VkBufferObj buffer; VkMemoryPropertyFlags reqs = 0; buffer.init_as_src(*m_device, 128 * 128 * 8, reqs); VkBufferImageCopy region = {}; region.bufferRowLength = 128; region.bufferImageHeight = 128; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // layerCount can't be 0 - Expect MISMATCHED_IMAGE_ASPECT region.imageSubresource.layerCount = 1; region.imageExtent.height = 4; region.imageExtent.width = 4; region.imageExtent.depth = 1; VkImageObj image2(m_device); image2.Init(128, 128, 1, VK_FORMAT_R8G8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); // 16bpp ASSERT_TRUE(image2.initialized()); VkBufferObj buffer2; VkMemoryPropertyFlags reqs2 = 0; buffer2.init_as_src(*m_device, 128 * 128 * 2, reqs2); VkBufferImageCopy region2 = {}; region2.bufferRowLength = 128; region2.bufferImageHeight = 128; region2.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // layerCount can't be 0 - Expect MISMATCHED_IMAGE_ASPECT region2.imageSubresource.layerCount = 1; region2.imageExtent.height = 4; region2.imageExtent.width = 4; region2.imageExtent.depth = 1; m_commandBuffer->begin(); // Image must have offset.z of 0 and extent.depth of 1 // Introduce failure by setting imageExtent.depth to 0 region.imageExtent.depth = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-srcImage-00201"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); region.imageExtent.depth = 1; // Image must have offset.z of 0 and extent.depth of 1 // Introduce failure by setting imageOffset.z to 4 // Note: Also (unavoidably) triggers 'region exceeds image' #1228 region.imageOffset.z = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-srcImage-00201"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); region.imageOffset.z = 0; // BufferOffset must be a multiple of the calling command's VkImage parameter's texel size // Introduce failure by setting bufferOffset to 1 and 1/2 texels region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00193"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); // BufferOffset must be a multiple of 4 // Introduce failure by setting bufferOffset to a value not divisible by 4 region2.bufferOffset = 6; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00194"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer2.handle(), image2.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region2); m_errorMonitor->VerifyFound(); // BufferRowLength must be 0, or greater than or equal to the width member of imageExtent region.bufferOffset = 0; region.imageExtent.height = 128; region.imageExtent.width = 128; // Introduce failure by setting bufferRowLength > 0 but less than width region.bufferRowLength = 64; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferRowLength-00195"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); // BufferImageHeight must be 0, or greater than or equal to the height member of imageExtent region.bufferRowLength = 128; // Introduce failure by setting bufferRowHeight > 0 but less than height region.bufferImageHeight = 64; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferImageHeight-00196"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); region.bufferImageHeight = 128; VkImageObj intImage1(m_device); intImage1.Init(128, 128, 1, VK_FORMAT_R8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); intImage1.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj intImage2(m_device); intImage2.Init(128, 128, 1, VK_FORMAT_R8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); intImage2.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {128, 0, 0}; blitRegion.srcOffsets[1] = {128, 128, 1}; blitRegion.dstOffsets[0] = {0, 128, 0}; blitRegion.dstOffsets[1] = {128, 128, 1}; // Look for NULL-blit warning m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdBlitImage: pRegions[0].srcOffsets specify a zero-volume area."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdBlitImage: pRegions[0].dstOffsets specify a zero-volume area."); vkCmdBlitImage(m_commandBuffer->handle(), intImage1.handle(), intImage1.Layout(), intImage2.handle(), intImage2.Layout(), 1, &blitRegion, VK_FILTER_LINEAR); m_errorMonitor->VerifyFound(); } VkResult GPDIFPHelper(VkPhysicalDevice dev, const VkImageCreateInfo *ci, VkImageFormatProperties *limits = nullptr) { VkImageFormatProperties tmp_limits; limits = limits ? limits : &tmp_limits; return vkGetPhysicalDeviceImageFormatProperties(dev, ci->format, ci->imageType, ci->tiling, ci->usage, ci->flags, limits); } TEST_F(VkLayerTest, CreateImageMiscErrors) { TEST_DESCRIPTION("Misc leftover valid usage errors in VkImageCreateInfo struct"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); VkImage null_image; // throwaway target for all the vkCreateImage VkImageCreateInfo tmp_img_ci = {}; tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; tmp_img_ci.flags = 0; // assumably any is supported tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages tmp_img_ci.extent = {64, 64, 1}; // limit is 256 for 3D, or 4096 tmp_img_ci.mipLevels = 1; // any is supported tmp_img_ci.arrayLayers = 1; // limit is 256 tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR // if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1 tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL; tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; const VkImageCreateInfo safe_image_ci = tmp_img_ci; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &safe_image_ci)); { VkImageCreateInfo image_ci = safe_image_ci; image_ci.sharingMode = VK_SHARING_MODE_CONCURRENT; image_ci.queueFamilyIndexCount = 2; image_ci.pQueueFamilyIndices = nullptr; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-sharingMode-00941"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.sharingMode = VK_SHARING_MODE_CONCURRENT; image_ci.queueFamilyIndexCount = 1; const uint32_t queue_family = 0; image_ci.pQueueFamilyIndices = &queue_family; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-sharingMode-00942"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.format = VK_FORMAT_UNDEFINED; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-format-00943"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; image_ci.arrayLayers = 6; image_ci.imageType = VK_IMAGE_TYPE_1D; image_ci.extent = {64, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00949"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; image_ci.imageType = VK_IMAGE_TYPE_3D; image_ci.extent = {4, 4, 4}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00949"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.imageType = VK_IMAGE_TYPE_3D; image_ci.extent = {4, 4, 4}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02257"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; image_ci.arrayLayers = 6; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02257"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.tiling = VK_IMAGE_TILING_LINEAR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02257"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.mipLevels = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02257"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_ci.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00963"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00966"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; image_ci.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00963"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00966"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00969"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } // InitialLayout not VK_IMAGE_LAYOUT_UNDEFINED or VK_IMAGE_LAYOUT_PREDEFINED { VkImageCreateInfo image_ci = safe_image_ci; image_ci.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-initialLayout-00993"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, CreateImageMinLimitsViolation) { TEST_DESCRIPTION("Create invalid image with invalid parameters violation minimum limit, such as being zero."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage null_image; // throwaway target for all the vkCreateImage VkImageCreateInfo tmp_img_ci = {}; tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; tmp_img_ci.flags = 0; // assumably any is supported tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages tmp_img_ci.extent = {1, 1, 1}; // limit is 256 for 3D, or 4096 tmp_img_ci.mipLevels = 1; // any is supported tmp_img_ci.arrayLayers = 1; // limit is 256 tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR // if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1 tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL; tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; const VkImageCreateInfo safe_image_ci = tmp_img_ci; enum Dimension { kWidth = 0x1, kHeight = 0x2, kDepth = 0x4 }; for (underlying_type<Dimension>::type bad_dimensions = 0x1; bad_dimensions < 0x8; ++bad_dimensions) { VkExtent3D extent = {1, 1, 1}; if (bad_dimensions & kWidth) { extent.width = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00944"); } if (bad_dimensions & kHeight) { extent.height = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00945"); } if (bad_dimensions & kDepth) { extent.depth = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00946"); } VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.imageType = VK_IMAGE_TYPE_3D; // has to be 3D otherwise it might trigger the non-1 error instead bad_image_ci.extent = extent; vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.mipLevels = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00947"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.arrayLayers = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-arrayLayers-00948"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; bad_image_ci.arrayLayers = 5; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00954"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.arrayLayers = 6; bad_image_ci.extent = {64, 63, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00954"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.imageType = VK_IMAGE_TYPE_1D; bad_image_ci.extent = {64, 2, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00956"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.imageType = VK_IMAGE_TYPE_1D; bad_image_ci.extent = {64, 1, 2}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00956"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.imageType = VK_IMAGE_TYPE_2D; bad_image_ci.extent = {64, 64, 2}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00957"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.imageType = VK_IMAGE_TYPE_2D; bad_image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; bad_image_ci.arrayLayers = 6; bad_image_ci.extent = {64, 64, 2}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00957"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.imageType = VK_IMAGE_TYPE_3D; bad_image_ci.arrayLayers = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00961"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } } VkFormat FindFormatLinearWithoutMips(VkPhysicalDevice gpu, VkImageCreateInfo image_ci) { image_ci.tiling = VK_IMAGE_TILING_LINEAR; const VkFormat first_vk_format = static_cast<VkFormat>(1); const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184 for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) { image_ci.format = format; // WORKAROUND for dev_sim and mock_icd not containing valid format limits yet VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (image_ci.tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; if (!(features & core_filter)) continue; VkImageFormatProperties img_limits; if (VK_SUCCESS == GPDIFPHelper(gpu, &image_ci, &img_limits) && img_limits.maxMipLevels == 1) return format; } return VK_FORMAT_UNDEFINED; } bool FindFormatWithoutSamples(VkPhysicalDevice gpu, VkImageCreateInfo &image_ci) { const VkFormat first_vk_format = static_cast<VkFormat>(1); const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184 for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) { image_ci.format = format; // WORKAROUND for dev_sim and mock_icd not containing valid format limits yet VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (image_ci.tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; if (!(features & core_filter)) continue; for (VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_64_BIT; samples > 0; samples = static_cast<VkSampleCountFlagBits>(samples >> 1)) { image_ci.samples = samples; VkImageFormatProperties img_limits; if (VK_SUCCESS == GPDIFPHelper(gpu, &image_ci, &img_limits) && !(img_limits.sampleCounts & samples)) return true; } } return false; } TEST_F(VkLayerTest, CreateImageMaxLimitsViolation) { TEST_DESCRIPTION("Create invalid image with invalid parameters exceeding physical device limits."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage null_image; // throwaway target for all the vkCreateImage VkImageCreateInfo tmp_img_ci = {}; tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; tmp_img_ci.flags = 0; // assumably any is supported tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages tmp_img_ci.extent = {1, 1, 1}; // limit is 256 for 3D, or 4096 tmp_img_ci.mipLevels = 1; // any is supported tmp_img_ci.arrayLayers = 1; // limit is 256 tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR // if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1 tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL; tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; const VkImageCreateInfo safe_image_ci = tmp_img_ci; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &safe_image_ci)); const VkPhysicalDeviceLimits &dev_limits = m_device->props.limits; { VkImageCreateInfo image_ci = safe_image_ci; image_ci.extent = {8, 8, 1}; image_ci.mipLevels = 4 + 1; // 4 = log2(8) + 1 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00958"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci.extent = {8, 15, 1}; image_ci.mipLevels = 4 + 1; // 4 = floor(log2(15)) + 1 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00958"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.tiling = VK_IMAGE_TILING_LINEAR; image_ci.extent = {64, 64, 1}; image_ci.format = FindFormatLinearWithoutMips(gpu(), image_ci); image_ci.mipLevels = 2; if (image_ci.format != VK_FORMAT_UNDEFINED) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-02255"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s Cannot find a format to test maxMipLevels limit; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; VkImageFormatProperties img_limits; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits)); if (img_limits.maxArrayLayers != UINT32_MAX) { image_ci.arrayLayers = img_limits.maxArrayLayers + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-arrayLayers-02256"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s VkImageFormatProperties::maxArrayLayers is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; bool found = FindFormatWithoutSamples(gpu(), image_ci); if (found) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02258"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s Could not find a format with some unsupported samples; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // (any attachment bit) VkImageFormatProperties img_limits; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits)); if (dev_limits.maxFramebufferWidth != UINT32_MAX) { image_ci.extent = {dev_limits.maxFramebufferWidth + 1, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00964"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s VkPhysicalDeviceLimits::maxFramebufferWidth is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } if (dev_limits.maxFramebufferHeight != UINT32_MAX) { image_ci.usage = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; // try different one too image_ci.extent = {64, dev_limits.maxFramebufferHeight + 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00965"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s VkPhysicalDeviceLimits::maxFramebufferHeight is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } } } bool FindUnsupportedImage(VkPhysicalDevice gpu, VkImageCreateInfo &image_ci) { const VkFormat first_vk_format = static_cast<VkFormat>(1); const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184 const std::vector<VkImageTiling> tilings = {VK_IMAGE_TILING_LINEAR, VK_IMAGE_TILING_OPTIMAL}; for (const auto tiling : tilings) { image_ci.tiling = tiling; for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) { image_ci.format = format; VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; if (!(features & core_filter)) continue; // We wand supported by features, but not by ImageFormatProperties // get as many usage flags as possible image_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; if (features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) image_ci.usage |= VK_IMAGE_USAGE_SAMPLED_BIT; if (features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) image_ci.usage |= VK_IMAGE_USAGE_STORAGE_BIT; if (features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) image_ci.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; if (features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) image_ci.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; VkImageFormatProperties img_limits; if (VK_ERROR_FORMAT_NOT_SUPPORTED == GPDIFPHelper(gpu, &image_ci, &img_limits)) { return true; } } } return false; } VkFormat FindFormatWithoutFeatures(VkPhysicalDevice gpu, VkImageTiling tiling, VkFormatFeatureFlags undesired_features = UINT32_MAX) { const VkFormat first_vk_format = static_cast<VkFormat>(1); const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184 for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; const auto valid_features = features & core_filter; if (undesired_features == UINT32_MAX) { if (!valid_features) return format; } else { if (valid_features && !(valid_features & undesired_features)) return format; } } return VK_FORMAT_UNDEFINED; } TEST_F(VkLayerTest, CopyImageTypeExtentMismatch) { // Image copy tests where format type and extents don't match ASSERT_NO_FATAL_FAILURE(Init()); VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_1D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {32, 1, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Create 1D image VkImageObj image_1D(m_device); image_1D.init(&ci); ASSERT_TRUE(image_1D.initialized()); // 2D image ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; VkImageObj image_2D(m_device); image_2D.init(&ci); ASSERT_TRUE(image_2D.initialized()); // 3D image ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {32, 32, 8}; VkImageObj image_3D(m_device); image_3D.init(&ci); ASSERT_TRUE(image_3D.initialized()); // 2D image array ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; ci.arrayLayers = 8; VkImageObj image_2D_array(m_device); image_2D_array.init(&ci); ASSERT_TRUE(image_2D_array.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 1, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Sanity check m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); // 1D texture w/ offset.y > 0. Source = VU 09c00124, dest = 09c00130 copy_region.srcOffset.y = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00146"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145"); // also y-dim overrun m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset.y = 0; copy_region.dstOffset.y = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-00152"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151"); // also y-dim overrun m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset.y = 0; // 1D texture w/ extent.height > 1. Source = VU 09c00124, dest = 09c00130 copy_region.extent.height = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00146"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145"); // also y-dim overrun m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-00152"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151"); // also y-dim overrun m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.extent.height = 1; // 1D texture w/ offset.z > 0. Source = VU 09c00df2, dest = 09c00df4 copy_region.srcOffset.z = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01785"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset.z = 0; copy_region.dstOffset.z = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01786"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset.z = 0; // 1D texture w/ extent.depth > 1. Source = VU 09c00df2, dest = 09c00df4 copy_region.extent.depth = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01785"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst) m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01786"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst) m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.extent.depth = 1; // 2D texture w/ offset.z > 0. Source = VU 09c00df6, dest = 09c00df8 copy_region.extent = {16, 16, 1}; copy_region.srcOffset.z = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01787"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src) m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset.z = 0; copy_region.dstOffset.z = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01788"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst) m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset.z = 0; // 3D texture accessing an array layer other than 0. VU 09c0011a copy_region.extent = {4, 4, 1}; copy_region.srcSubresource.baseArrayLayer = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00141"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcSubresource-01698"); // also 'too many layers' m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageTypeExtentMismatchMaintenance1) { // Image copy tests where format type and extents don't match and the Maintenance1 extension is enabled ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s Maintenance1 extension cannot be enabled, test skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkFormat image_format = VK_FORMAT_R8G8B8A8_UNORM; VkFormatProperties format_props; // TODO: Remove this check if or when devsim handles extensions. // The chosen format has mandatory support the transfer src and dst format features when Maitenance1 is enabled. However, our // use of devsim and the mock ICD violate this guarantee. vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), image_format, &format_props); if (!(format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT)) { printf("%s Maintenance1 extension is not supported.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_1D; ci.format = image_format; ci.extent = {32, 1, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Create 1D image VkImageObj image_1D(m_device); image_1D.init(&ci); ASSERT_TRUE(image_1D.initialized()); // 2D image ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; VkImageObj image_2D(m_device); image_2D.init(&ci); ASSERT_TRUE(image_2D.initialized()); // 3D image ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {32, 32, 8}; VkImageObj image_3D(m_device); image_3D.init(&ci); ASSERT_TRUE(image_3D.initialized()); // 2D image array ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; ci.arrayLayers = 8; VkImageObj image_2D_array(m_device); image_2D_array.init(&ci); ASSERT_TRUE(image_2D_array.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 1, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Copy from layer not present copy_region.srcSubresource.baseArrayLayer = 4; copy_region.srcSubresource.layerCount = 6; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcSubresource-01698"); m_commandBuffer->CopyImage(image_2D_array.image(), VK_IMAGE_LAYOUT_GENERAL, image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; // Copy to layer not present copy_region.dstSubresource.baseArrayLayer = 1; copy_region.dstSubresource.layerCount = 8; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstSubresource-01699"); m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D_array.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstSubresource.layerCount = 1; m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageCompressedBlockAlignment) { // Image copy tests on compressed images with block alignment errors SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(Init()); // Select a compressed format and verify support VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); VkFormat compressed_format = VK_FORMAT_UNDEFINED; if (device_features.textureCompressionBC) { compressed_format = VK_FORMAT_BC3_SRGB_BLOCK; } else if (device_features.textureCompressionETC2) { compressed_format = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK; } else if (device_features.textureCompressionASTC_LDR) { compressed_format = VK_FORMAT_ASTC_4x4_UNORM_BLOCK; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = compressed_format; ci.extent = {64, 64, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageFormatProperties img_prop = {}; if (VK_SUCCESS != vkGetPhysicalDeviceImageFormatProperties(m_device->phy().handle(), ci.format, ci.imageType, ci.tiling, ci.usage, ci.flags, &img_prop)) { printf("%s No compressed formats supported - CopyImageCompressedBlockAlignment skipped.\n", kSkipPrefix); return; } // Create images VkImageObj image_1(m_device); image_1.init(&ci); ASSERT_TRUE(image_1.initialized()); ci.extent = {62, 62, 1}; // slightly smaller and not divisible by block size VkImageObj image_2(m_device); image_2.init(&ci); ASSERT_TRUE(image_2.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {48, 48, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Sanity check m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); std::string vuid; bool ycbcr = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) || (DeviceValidationVersion() >= VK_API_VERSION_1_1)); // Src, Dest offsets must be multiples of compressed block sizes {4, 4, 1} // Image transfer granularity gets set to compressed block size, so an ITG error is also (unavoidably) triggered. vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01727" : "VUID-VkImageCopy-srcOffset-00157"; copy_region.srcOffset = {2, 4, 0}; // source width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset = {12, 1, 0}; // source height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset = {0, 0, 0}; vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01731" : "VUID-VkImageCopy-dstOffset-00162"; copy_region.dstOffset = {1, 0, 0}; // dest width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dstOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {4, 1, 0}; // dest height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dstOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {0, 0, 0}; // Copy extent must be multiples of compressed block sizes {4, 4, 1} if not full width/height vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01728" : "VUID-VkImageCopy-extent-00158"; copy_region.extent = {62, 60, 1}; // source width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01729" : "VUID-VkImageCopy-extent-00159"; copy_region.extent = {60, 62, 1}; // source height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01732" : "VUID-VkImageCopy-extent-00163"; copy_region.extent = {62, 60, 1}; // dest width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity m_commandBuffer->CopyImage(image_2.image(), VK_IMAGE_LAYOUT_GENERAL, image_1.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01733" : "VUID-VkImageCopy-extent-00164"; copy_region.extent = {60, 62, 1}; // dest height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity m_commandBuffer->CopyImage(image_2.image(), VK_IMAGE_LAYOUT_GENERAL, image_1.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Note: "VUID-VkImageCopy-extent-00160", "VUID-VkImageCopy-extent-00165", "VUID-VkImageCopy-srcImage-01730", // "VUID-VkImageCopy-dstImage-01734" // There are currently no supported compressed formats with a block depth other than 1, // so impossible to create a 'not a multiple' condition for depth. m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageSinglePlane422Alignment) { // Image copy tests on single-plane _422 formats with block alignment errors // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Select a _422 format and verify support VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8B8G8R8_422_UNORM_KHR; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify formats VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Single-plane _422 image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } // Create images ci.extent = {64, 64, 1}; VkImageObj image_422(m_device); image_422.init(&ci); ASSERT_TRUE(image_422.initialized()); ci.extent = {64, 64, 1}; ci.format = VK_FORMAT_R8G8B8A8_UNORM; VkImageObj image_ucmp(m_device); image_ucmp.init(&ci); ASSERT_TRUE(image_ucmp.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {48, 48, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Src offsets must be multiples of compressed block sizes copy_region.srcOffset = {3, 4, 0}; // source offset x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01727"); m_commandBuffer->CopyImage(image_422.image(), VK_IMAGE_LAYOUT_GENERAL, image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset = {0, 0, 0}; // Dst offsets must be multiples of compressed block sizes copy_region.dstOffset = {1, 0, 0}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01731"); m_commandBuffer->CopyImage(image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, image_422.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {0, 0, 0}; // Copy extent must be multiples of compressed block sizes if not full width/height copy_region.extent = {31, 60, 1}; // 422 source, extent.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01728"); m_commandBuffer->CopyImage(image_422.image(), VK_IMAGE_LAYOUT_GENERAL, image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // 422 dest, extent.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01732"); m_commandBuffer->CopyImage(image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, image_422.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {0, 0, 0}; m_commandBuffer->end(); } TEST_F(VkLayerTest, MultiplaneImageSamplerConversionMismatch) { TEST_DESCRIPTION("Create sampler with ycbcr conversion and use with an image created without ycrcb conversion"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } // Enable Ycbcr Conversion Features VkPhysicalDeviceSamplerYcbcrConversionFeatures ycbcr_features = {}; ycbcr_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES; ycbcr_features.samplerYcbcrConversion = VK_TRUE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &ycbcr_features)); const VkImageCreateInfo ci = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, NULL, 0, VK_IMAGE_TYPE_2D, VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR, {128, 128, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_LINEAR, VK_IMAGE_USAGE_SAMPLED_BIT, VK_SHARING_MODE_EXCLUSIVE, VK_IMAGE_LAYOUT_UNDEFINED}; // Verify formats bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; } // Create Ycbcr conversion VkSamplerYcbcrConversionCreateInfo ycbcr_create_info = {VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, NULL, VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR, VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, VK_SAMPLER_YCBCR_RANGE_ITU_FULL, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, VK_CHROMA_LOCATION_COSITED_EVEN, VK_CHROMA_LOCATION_COSITED_EVEN, VK_FILTER_NEAREST, false}; VkSamplerYcbcrConversion conversion; vkCreateSamplerYcbcrConversion(m_device->handle(), &ycbcr_create_info, nullptr, &conversion); VkSamplerYcbcrConversionInfo ycbcr_info = {}; ycbcr_info.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO; ycbcr_info.conversion = conversion; // Create a sampler using conversion VkSamplerCreateInfo sci = SafeSaneSamplerCreateInfo(); sci.pNext = &ycbcr_info; VkSampler sampler; VkResult err = vkCreateSampler(m_device->device(), &sci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Create an image without a Ycbcr conversion VkImageObj mpimage(m_device); mpimage.init(&ci); VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = mpimage.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &ivci, nullptr, &view); // Use the image and sampler together in a descriptor set OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, &sampler}, }); VkDescriptorImageInfo image_info{}; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; image_info.imageView = view; image_info.sampler = sampler; // Update the descriptor set expecting to get an error VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-01948"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySamplerYcbcrConversion(m_device->device(), conversion, nullptr); vkDestroyImageView(m_device->device(), view, NULL); vkDestroySampler(m_device->device(), sampler, nullptr); } TEST_F(VkLayerTest, CopyImageMultiplaneAspectBits) { // Image copy tests on multiplane images with aspect errors // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Select multi-plane formats and verify support VkFormat mp3_format = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR; VkFormat mp2_format = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR; VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = mp2_format; ci.extent = {256, 256, 1}; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify formats VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); ci.format = mp3_format; supported = supported && ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Multiplane image formats not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } // Create images VkImageObj mp3_image(m_device); mp3_image.init(&ci); ASSERT_TRUE(mp3_image.initialized()); ci.format = mp2_format; VkImageObj mp2_image(m_device); mp2_image.init(&ci); ASSERT_TRUE(mp2_image.initialized()); ci.format = VK_FORMAT_D24_UNORM_S8_UINT; VkImageObj sp_image(m_device); sp_image.init(&ci); ASSERT_TRUE(sp_image.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {128, 128, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01552"); m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01553"); m_commandBuffer->CopyImage(mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01554"); m_commandBuffer->CopyImage(mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01555"); m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01556"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "dest image depth/stencil formats"); // also m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, sp_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01557"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "dest image depth/stencil formats"); // also m_commandBuffer->CopyImage(sp_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageSrcSizeExceeded) { // Image copy with source region specified greater than src image size ASSERT_NO_FATAL_FAILURE(Init()); // Create images with full mip chain VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_3D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {32, 32, 8}; ci.mipLevels = 6; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj src_image(m_device); src_image.init(&ci); ASSERT_TRUE(src_image.initialized()); // Dest image with one more mip level ci.extent = {64, 64, 16}; ci.mipLevels = 7; ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&ci); ASSERT_TRUE(dst_image.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 32, 8}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); // Source exceeded in x-dim, VU 01202 copy_region.srcOffset.x = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00122"); // General "contained within" VU m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00144"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Source exceeded in y-dim, VU 01203 copy_region.srcOffset.x = 0; copy_region.extent.height = 48; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00122"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Source exceeded in z-dim, VU 01204 copy_region.extent = {4, 4, 4}; copy_region.srcSubresource.mipLevel = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00122"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageDstSizeExceeded) { // Image copy with dest region specified greater than dest image size ASSERT_NO_FATAL_FAILURE(Init()); // Create images with full mip chain VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_3D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {32, 32, 8}; ci.mipLevels = 6; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj dst_image(m_device); dst_image.init(&ci); ASSERT_TRUE(dst_image.initialized()); // Src image with one more mip level ci.extent = {64, 64, 16}; ci.mipLevels = 7; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; VkImageObj src_image(m_device); src_image.init(&ci); ASSERT_TRUE(src_image.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 32, 8}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); // Dest exceeded in x-dim, VU 01205 copy_region.dstOffset.x = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00123"); // General "contained within" VU m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00150"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Dest exceeded in y-dim, VU 01206 copy_region.dstOffset.x = 0; copy_region.extent.height = 48; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00123"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Dest exceeded in z-dim, VU 01207 copy_region.extent = {4, 4, 4}; copy_region.dstSubresource.mipLevel = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00123"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageFormatSizeMismatch) { VkResult err; bool pass; // Create color images with different format sizes and try to copy between them m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00135"); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkDeviceMemory srcMem; VkDeviceMemory destMem; VkMemoryRequirements memReqs; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage); ASSERT_VK_SUCCESS(err); image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; // Introduce failure by creating second image with a different-sized format. image_create_info.format = VK_FORMAT_R5G5B5A1_UNORM_PACK16; VkFormatProperties properties; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), image_create_info.format, &properties); if (properties.optimalTilingFeatures == 0) { vkDestroyImage(m_device->device(), srcImage, NULL); printf("%s Image format not supported; skipped.\n", kSkipPrefix); return; } err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.pNext = NULL; memAlloc.allocationSize = 0; memAlloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = 1; copyRegion.extent.height = 1; copyRegion.extent.depth = 1; m_commandBuffer->CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), dstImage, NULL); vkFreeMemory(m_device->device(), destMem, NULL); // Copy to multiplane image with mismatched sizes m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00135"); VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM; ci.extent = {32, 32, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_LINEAR; ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_DST_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); bool ycbcr = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) || (DeviceValidationVersion() >= VK_API_VERSION_1_1)); if (!supported || !ycbcr) { printf("%s Image format not supported; skipped multiplanar copy test.\n", kSkipPrefix); vkDestroyImage(m_device->device(), srcImage, NULL); vkFreeMemory(m_device->device(), srcMem, NULL); return; } VkImageObj mpImage(m_device); mpImage.init(&ci); ASSERT_TRUE(mpImage.initialized()); copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT; vkResetCommandBuffer(m_commandBuffer->handle(), 0); m_commandBuffer->begin(); m_commandBuffer->CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, mpImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), srcImage, NULL); vkFreeMemory(m_device->device(), srcMem, NULL); } TEST_F(VkLayerTest, CopyImageDepthStencilFormatMismatch) { ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't depth stencil image format.\n", kSkipPrefix); return; } VkFormatProperties properties; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT, &properties); if (properties.optimalTilingFeatures == 0) { printf("%s Image format not supported; skipped.\n", kSkipPrefix); return; } VkImageObj srcImage(m_device); srcImage.Init(32, 32, 1, VK_FORMAT_D32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(srcImage.initialized()); VkImageObj dstImage(m_device); dstImage.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(dstImage.initialized()); // Create two images of different types and try to copy between them m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = 1; copyRegion.extent.height = 1; copyRegion.extent.depth = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdCopyImage called with unmatched source and dest image depth"); m_commandBuffer->CopyImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CopyImageSampleCountMismatch) { TEST_DESCRIPTION("Image copies with sample count mis-matches"); ASSERT_NO_FATAL_FAILURE(Init()); VkImageFormatProperties image_format_properties; vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, 0, &image_format_properties); if ((0 == (VK_SAMPLE_COUNT_2_BIT & image_format_properties.sampleCounts)) || (0 == (VK_SAMPLE_COUNT_4_BIT & image_format_properties.sampleCounts))) { printf("%s Image multi-sample support not found; skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj image1(m_device); image1.init(&ci); ASSERT_TRUE(image1.initialized()); ci.samples = VK_SAMPLE_COUNT_2_BIT; VkImageObj image2(m_device); image2.init(&ci); ASSERT_TRUE(image2.initialized()); ci.samples = VK_SAMPLE_COUNT_4_BIT; VkImageObj image4(m_device); image4.init(&ci); ASSERT_TRUE(image4.initialized()); m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset = {0, 0, 0}; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset = {0, 0, 0}; copyRegion.extent = {128, 128, 1}; // Copy a single sample image to/from a multi-sample image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image1.handle(), VK_IMAGE_LAYOUT_GENERAL, image4.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image2.handle(), VK_IMAGE_LAYOUT_GENERAL, image1.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // Copy between multi-sample images with different sample counts m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image2.handle(), VK_IMAGE_LAYOUT_GENERAL, image4.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image4.handle(), VK_IMAGE_LAYOUT_GENERAL, image2.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageAspectMismatch) { TEST_DESCRIPTION("Image copies with aspect mask errors"); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(Init()); auto ds_format = FindSupportedDepthStencilFormat(gpu()); if (!ds_format) { printf("%s Couldn't find depth stencil format.\n", kSkipPrefix); return; } VkFormatProperties properties; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT, &properties); if (properties.optimalTilingFeatures == 0) { printf("%s Image format VK_FORMAT_D32_SFLOAT not supported; skipped.\n", kSkipPrefix); return; } VkImageObj color_image(m_device), ds_image(m_device), depth_image(m_device); color_image.Init(128, 128, 1, VK_FORMAT_R32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT); depth_image.Init(128, 128, 1, VK_FORMAT_D32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ds_image.Init(128, 128, 1, ds_format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(color_image.initialized()); ASSERT_TRUE(depth_image.initialized()); ASSERT_TRUE(ds_image.initialized()); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset = {0, 0, 0}; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset = {64, 0, 0}; copyRegion.extent = {64, 128, 1}; // Submitting command before command buffer is in recording state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "You must call vkBeginCommandBuffer"); // "VUID-vkCmdCopyImage-commandBuffer-recording"); vkCmdCopyImage(m_commandBuffer->handle(), depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); m_commandBuffer->begin(); // Src and dest aspect masks don't match copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; bool ycbcr = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) || (DeviceValidationVersion() >= VK_API_VERSION_1_1)); std::string vuid = (ycbcr ? "VUID-VkImageCopy-srcImage-01551" : "VUID-VkImageCopy-aspectMask-00137"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), ds_image.handle(), VK_IMAGE_LAYOUT_GENERAL, ds_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; // Illegal combinations of aspect bits copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; // color must be alone copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00167"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00142"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // same test for dstSubresource copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; // color must be alone m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00167"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00143"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // Metadata aspect is illegal copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00168"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // same test for dstSubresource copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00168"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; // Aspect mask doesn't match source image format m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00142"); // Again redundant but unavoidable m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "unmatched source and dest image depth/stencil formats"); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // Aspect mask doesn't match dest image format copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00143"); // Again redundant but unavoidable m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "unmatched source and dest image depth/stencil formats"); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ResolveImageLowSampleCount) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdResolveImage called with source sample count less than 2."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of sample count 1 and try to Resolve between them VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkImageObj srcImage(m_device); srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); VkImageObj dstImage(m_device); dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ResolveImageHighSampleCount) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdResolveImage called with dest sample count greater than 1."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of sample count 4 and try to Resolve between them VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.flags = 0; VkImageObj srcImage(m_device); srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); VkImageObj dstImage(m_device); dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); // Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest? // VK_IMAGE_LAYOUT_UNDEFINED = 0, // VK_IMAGE_LAYOUT_GENERAL = 1, VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ResolveImageFormatMismatch) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdResolveImage called with unmatched source and dest formats."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkDeviceMemory srcMem; VkDeviceMemory destMem; VkMemoryRequirements memReqs; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage); ASSERT_VK_SUCCESS(err); // Set format to something other than source image image_create_info.format = VK_FORMAT_R32_SFLOAT; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.pNext = NULL; memAlloc.allocationSize = 0; memAlloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); // Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest? // VK_IMAGE_LAYOUT_UNDEFINED = 0, // VK_IMAGE_LAYOUT_GENERAL = 1, VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), srcImage, NULL); vkDestroyImage(m_device->device(), dstImage, NULL); vkFreeMemory(m_device->device(), srcMem, NULL); vkFreeMemory(m_device->device(), destMem, NULL); } TEST_F(VkLayerTest, ResolveImageTypeMismatch) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdResolveImage called with unmatched source and dest image types."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkDeviceMemory srcMem; VkDeviceMemory destMem; VkMemoryRequirements memReqs; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage); ASSERT_VK_SUCCESS(err); image_create_info.imageType = VK_IMAGE_TYPE_1D; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.pNext = NULL; memAlloc.allocationSize = 0; memAlloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); // Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest? // VK_IMAGE_LAYOUT_UNDEFINED = 0, // VK_IMAGE_LAYOUT_GENERAL = 1, VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), srcImage, NULL); vkDestroyImage(m_device->device(), dstImage, NULL); vkFreeMemory(m_device->device(), srcMem, NULL); vkFreeMemory(m_device->device(), destMem, NULL); } TEST_F(VkLayerTest, ResolveImageLayoutMismatch) { ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImageObj srcImage(m_device); VkImageObj dstImage(m_device); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.flags = 0; srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); // source image must have valid contents before resolve VkClearColorValue clear_color = {{0, 0, 0, 0}}; VkImageSubresourceRange subresource = {}; subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresource.layerCount = 1; subresource.levelCount = 1; srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->ClearColorImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &subresource); srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); dstImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; // source image layout mismatch m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcImageLayout-00260"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_GENERAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); // dst image layout mismatch m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstImageLayout-00262"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ResolveInvalidSubresource) { ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImageObj srcImage(m_device); VkImageObj dstImage(m_device); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.flags = 0; srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); // source image must have valid contents before resolve VkClearColorValue clear_color = {{0, 0, 0, 0}}; VkImageSubresourceRange subresource = {}; subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresource.layerCount = 1; subresource.levelCount = 1; srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->ClearColorImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &subresource); srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); dstImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; // invalid source mip level resolveRegion.srcSubresource.mipLevel = image_create_info.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcSubresource-01709"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.srcSubresource.mipLevel = 0; // invalid dest mip level resolveRegion.dstSubresource.mipLevel = image_create_info.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstSubresource-01710"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.dstSubresource.mipLevel = 0; // invalid source array layer range resolveRegion.srcSubresource.baseArrayLayer = image_create_info.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcSubresource-01711"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.srcSubresource.baseArrayLayer = 0; // invalid dest array layer range resolveRegion.dstSubresource.baseArrayLayer = image_create_info.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstSubresource-01712"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.dstSubresource.baseArrayLayer = 0; m_commandBuffer->end(); } TEST_F(VkLayerTest, DepthStencilImageViewWithColorAspectBitError) { // Create a single Image descriptor and cause it to first hit an error due // to using a DS format, then cause it to hit error due to COLOR_BIT not // set in aspect // The image format check comes 2nd in validation so we trigger it first, // then when we cause aspect fail next, bad format check will be preempted VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Combination depth/stencil image formats can have only the "); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't find depth stencil format.\n", kSkipPrefix); return; } VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); VkImage image_bad; VkImage image_good; // One bad format and one good format for Color attachment const VkFormat tex_format_bad = depth_format; const VkFormat tex_format_good = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format_bad; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image_bad); ASSERT_VK_SUCCESS(err); image_create_info.format = tex_format_good; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image_good); ASSERT_VK_SUCCESS(err); // ---Bind image memory--- VkMemoryRequirements img_mem_reqs; vkGetImageMemoryRequirements(m_device->device(), image_bad, &img_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_alloc_info.pNext = NULL; image_alloc_info.memoryTypeIndex = 0; image_alloc_info.allocationSize = img_mem_reqs.size; bool pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &image_alloc_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); ASSERT_TRUE(pass); VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &image_alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image_bad, mem, 0); ASSERT_VK_SUCCESS(err); // ----------------------- VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image_bad; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format_bad; image_view_create_info.subresourceRange.baseArrayLayer = 0; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image_bad, NULL); vkDestroyImage(m_device->device(), image_good, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, ClearImageErrors) { TEST_DESCRIPTION("Call ClearColorImage w/ a depth|stencil image and ClearDepthStencilImage with a color image."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); // Color image VkClearColorValue clear_color; memset(clear_color.uint32, 0, sizeof(uint32_t) * 4); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; const VkFormat color_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t img_width = 32; const int32_t img_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = color_format; image_create_info.extent.width = img_width; image_create_info.extent.height = img_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; vk_testing::Image color_image_no_transfer; color_image_no_transfer.init(*m_device, image_create_info, reqs); image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image color_image; color_image.init(*m_device, image_create_info, reqs); const VkImageSubresourceRange color_range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_COLOR_BIT); // Depth/Stencil image VkClearDepthStencilValue clear_value = {0}; reqs = 0; // don't need HOST_VISIBLE DS image VkImageCreateInfo ds_image_create_info = vk_testing::Image::create_info(); ds_image_create_info.imageType = VK_IMAGE_TYPE_2D; ds_image_create_info.format = VK_FORMAT_D16_UNORM; ds_image_create_info.extent.width = 64; ds_image_create_info.extent.height = 64; ds_image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; ds_image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image ds_image; ds_image.init(*m_device, ds_image_create_info, reqs); const VkImageSubresourceRange ds_range = vk_testing::Image::subresource_range(ds_image_create_info, VK_IMAGE_ASPECT_DEPTH_BIT); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearColorImage called with depth/stencil image."); vkCmdClearColorImage(m_commandBuffer->handle(), ds_image.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &color_range); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearColorImage called with image created without VK_IMAGE_USAGE_TRANSFER_DST_BIT"); vkCmdClearColorImage(m_commandBuffer->handle(), color_image_no_transfer.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &color_range); m_errorMonitor->VerifyFound(); // Call CmdClearDepthStencilImage with color image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearDepthStencilImage called without a depth/stencil image."); vkCmdClearDepthStencilImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_value, 1, &ds_range); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CommandQueueFlags) { TEST_DESCRIPTION( "Allocate a command buffer on a queue that does not support graphics and try to issue a graphics-only command"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t queueFamilyIndex = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT); if (queueFamilyIndex == UINT32_MAX) { printf("%s Non-graphics queue family not found; skipped.\n", kSkipPrefix); return; } else { // Create command pool on a non-graphics queue VkCommandPoolObj command_pool(m_device, queueFamilyIndex); // Setup command buffer on pool VkCommandBufferObj command_buffer(m_device, &command_pool); command_buffer.begin(); // Issue a graphics only command m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-commandBuffer-cmdpool"); VkViewport viewport = {0, 0, 16, 16, 0, 1}; command_buffer.SetViewport(0, 1, &viewport); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ExecuteUnrecordedSecondaryCB) { TEST_DESCRIPTION("Attempt vkCmdExecuteCommands with a CB in the initial state"); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); // never record secondary m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdExecuteCommands-pCommandBuffers-00089"); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ExecuteUnrecordedPrimaryCB) { TEST_DESCRIPTION("Attempt vkQueueSubmit with a CB in the initial state"); ASSERT_NO_FATAL_FAILURE(Init()); // never record m_commandBuffer VkSubmitInfo si = {}; si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; si.commandBufferCount = 1; si.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkQueueSubmit-pCommandBuffers-00072"); vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ExecuteSecondaryCBWithLayoutMismatch) { TEST_DESCRIPTION("Attempt vkCmdExecuteCommands with a CB with incorrect initial layout."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkImageSubresource image_sub = VkImageObj::subresource(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0); VkImageSubresourceRange image_sub_range = VkImageObj::subresource_range(image_sub); VkImageObj image(m_device); image.init(&image_create_info); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier image_barrier = image.image_memory_barrier(0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, image_sub_range); auto pipeline = [&image_barrier](const VkCommandBufferObj &cb, VkImageLayout old_layout, VkImageLayout new_layout) { image_barrier.oldLayout = old_layout; image_barrier.newLayout = new_layout; vkCmdPipelineBarrier(cb.handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_barrier); }; // Validate that mismatched use of image layout in secondary command buffer is caught at record time VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); pipeline(secondary, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); secondary.end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"); m_commandBuffer->begin(); pipeline(*m_commandBuffer, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); // Validate that we've tracked the changes from the secondary CB correctly m_errorMonitor->ExpectSuccess(); pipeline(*m_commandBuffer, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); m_commandBuffer->reset(); secondary.reset(); // Validate that UNDEFINED doesn't false positive on us secondary.begin(); pipeline(secondary, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); secondary.end(); m_commandBuffer->begin(); pipeline(*m_commandBuffer, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_errorMonitor->ExpectSuccess(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ExtensionNotEnabled) { TEST_DESCRIPTION("Validate that using an API from an unenabled extension returns an error"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Required extensions except VK_KHR_GET_MEMORY_REQUIREMENTS_2 -- to create the needed error std::vector<const char *> required_device_extensions = {VK_KHR_MAINTENANCE1_EXTENSION_NAME, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME}; for (auto dev_ext : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, dev_ext)) { m_device_extension_names.push_back(dev_ext); } else { printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix, dev_ext); break; } } // Need to ignore this error to get to the one we're testing m_errorMonitor->SetUnexpectedError("VUID-vkCreateDevice-ppEnabledExtensionNames-01387"); ASSERT_NO_FATAL_FAILURE(InitState()); // Find address of extension API auto vkCreateSamplerYcbcrConversionKHR = (PFN_vkCreateSamplerYcbcrConversionKHR)vkGetDeviceProcAddr(m_device->handle(), "vkCreateSamplerYcbcrConversionKHR"); if (vkCreateSamplerYcbcrConversionKHR == nullptr) { printf("%s VK_KHR_sampler_ycbcr_conversion not supported by device; skipped.\n", kSkipPrefix); return; } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-GeneralParameterError-ExtensionNotEnabled"); VkSamplerYcbcrConversionCreateInfo ycbcr_info = {VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, NULL, VK_FORMAT_UNDEFINED, VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, VK_SAMPLER_YCBCR_RANGE_ITU_FULL, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, VK_CHROMA_LOCATION_COSITED_EVEN, VK_CHROMA_LOCATION_COSITED_EVEN, VK_FILTER_NEAREST, false}; VkSamplerYcbcrConversion conversion; vkCreateSamplerYcbcrConversionKHR(m_device->handle(), &ycbcr_info, nullptr, &conversion); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, Maintenance1AndNegativeViewport) { TEST_DESCRIPTION("Attempt to enable AMD_negative_viewport_height and Maintenance1_KHR extension simultaneously"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!((DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) && (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME)))) { printf("%s Maintenance1 and AMD_negative viewport height extensions not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props); const char *extension_names[2] = {"VK_KHR_maintenance1", "VK_AMD_negative_viewport_height"}; VkDevice testDevice; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = queue_info.size(); device_create_info.pQueueCreateInfos = queue_info.data(); device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.enabledExtensionCount = 2; device_create_info.ppEnabledExtensionNames = (const char *const *)extension_names; device_create_info.pEnabledFeatures = &features; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-00374"); // The following unexpected error is coming from the LunarG loader. Do not make it a desired message because platforms that do // not use the LunarG loader (e.g. Android) will not see the message and the test will fail. m_errorMonitor->SetUnexpectedError("Failed to create device chain."); vkCreateDevice(gpu(), &device_create_info, NULL, &testDevice); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCreateDescriptorPool) { TEST_DESCRIPTION("Attempt to create descriptor pool with invalid parameters"); ASSERT_NO_FATAL_FAILURE(Init()); const uint32_t default_descriptor_count = 1; const VkDescriptorPoolSize dp_size_template{VK_DESCRIPTOR_TYPE_SAMPLER, default_descriptor_count}; const VkDescriptorPoolCreateInfo dp_ci_template{VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, nullptr, // pNext 0, // flags 1, // maxSets 1, // poolSizeCount &dp_size_template}; // try maxSets = 0 { VkDescriptorPoolCreateInfo invalid_dp_ci = dp_ci_template; invalid_dp_ci.maxSets = 0; // invalid maxSets value m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolCreateInfo-maxSets-00301"); { VkDescriptorPool pool; vkCreateDescriptorPool(m_device->device(), &invalid_dp_ci, nullptr, &pool); } m_errorMonitor->VerifyFound(); } // try descriptorCount = 0 { VkDescriptorPoolSize invalid_dp_size = dp_size_template; invalid_dp_size.descriptorCount = 0; // invalid descriptorCount value VkDescriptorPoolCreateInfo dp_ci = dp_ci_template; dp_ci.pPoolSizes = &invalid_dp_size; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolSize-descriptorCount-00302"); { VkDescriptorPool pool; vkCreateDescriptorPool(m_device->device(), &dp_ci, nullptr, &pool); } m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, InvalidCreateBufferSize) { TEST_DESCRIPTION("Attempt to create VkBuffer with size of zero"); ASSERT_NO_FATAL_FAILURE(Init()); VkBufferCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-size-00912"); info.size = 0; VkBuffer buffer; vkCreateBuffer(m_device->device(), &info, nullptr, &buffer); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SetDynViewportParamTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetViewport without multiViewport feature"); SetTargetApiVersion(VK_API_VERSION_1_1); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); const VkViewport vp = {0.0, 0.0, 64.0, 64.0, 0.0, 1.0}; const VkViewport viewports[] = {vp, vp}; m_commandBuffer->begin(); // array tests m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224"); vkCmdSetViewport(m_commandBuffer->handle(), 1, 1, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-01225"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 2, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); vkCmdSetViewport(m_commandBuffer->handle(), 1, 0, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-01225"); vkCmdSetViewport(m_commandBuffer->handle(), 1, 2, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, nullptr); m_errorMonitor->VerifyFound(); // core viewport tests using std::vector; struct TestCase { VkViewport vp; std::string veid; }; // not necessarily boundary values (unspecified cast rounding), but guaranteed to be over limit const auto one_past_max_w = NearestGreater(static_cast<float>(m_device->props.limits.maxViewportDimensions[0])); const auto one_past_max_h = NearestGreater(static_cast<float>(m_device->props.limits.maxViewportDimensions[1])); const auto min_bound = m_device->props.limits.viewportBoundsRange[0]; const auto max_bound = m_device->props.limits.viewportBoundsRange[1]; const auto one_before_min_bounds = NearestSmaller(min_bound); const auto one_past_max_bounds = NearestGreater(max_bound); const auto below_zero = NearestSmaller(0.0f); const auto past_one = NearestGreater(1.0f); vector<TestCase> test_cases = { {{0.0, 0.0, 0.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01770"}, {{0.0, 0.0, one_past_max_w, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01771"}, {{0.0, 0.0, NAN, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01770"}, {{0.0, 0.0, 64.0, one_past_max_h, 0.0, 1.0}, "VUID-VkViewport-height-01773"}, {{one_before_min_bounds, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01774"}, {{one_past_max_bounds, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01232"}, {{NAN, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01774"}, {{0.0, one_before_min_bounds, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-y-01775"}, {{0.0, NAN, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-y-01775"}, {{max_bound, 0.0, 1.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01232"}, {{0.0, max_bound, 64.0, 1.0, 0.0, 1.0}, "VUID-VkViewport-y-01233"}, {{0.0, 0.0, 64.0, 64.0, below_zero, 1.0}, "VUID-VkViewport-minDepth-01234"}, {{0.0, 0.0, 64.0, 64.0, past_one, 1.0}, "VUID-VkViewport-minDepth-01234"}, {{0.0, 0.0, 64.0, 64.0, NAN, 1.0}, "VUID-VkViewport-minDepth-01234"}, {{0.0, 0.0, 64.0, 64.0, 0.0, below_zero}, "VUID-VkViewport-maxDepth-01235"}, {{0.0, 0.0, 64.0, 64.0, 0.0, past_one}, "VUID-VkViewport-maxDepth-01235"}, {{0.0, 0.0, 64.0, 64.0, 0.0, NAN}, "VUID-VkViewport-maxDepth-01235"}, }; if (DeviceValidationVersion() < VK_API_VERSION_1_1) { test_cases.push_back({{0.0, 0.0, 64.0, 0.0, 0.0, 1.0}, "VUID-VkViewport-height-01772"}); test_cases.push_back({{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, "VUID-VkViewport-height-01772"}); } else { test_cases.push_back({{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, "VUID-VkViewport-height-01773"}); } for (const auto &test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.veid); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &test_case.vp); m_errorMonitor->VerifyFound(); } } void NegHeightViewportTests(VkDeviceObj *m_device, VkCommandBufferObj *m_commandBuffer, ErrorMonitor *m_errorMonitor) { const auto &limits = m_device->props.limits; m_commandBuffer->begin(); using std::vector; struct TestCase { VkViewport vp; vector<std::string> vuids; }; // not necessarily boundary values (unspecified cast rounding), but guaranteed to be over limit const auto one_before_min_h = NearestSmaller(-static_cast<float>(limits.maxViewportDimensions[1])); const auto one_past_max_h = NearestGreater(static_cast<float>(limits.maxViewportDimensions[1])); const auto min_bound = limits.viewportBoundsRange[0]; const auto max_bound = limits.viewportBoundsRange[1]; const auto one_before_min_bound = NearestSmaller(min_bound); const auto one_past_max_bound = NearestGreater(max_bound); const vector<TestCase> test_cases = {{{0.0, 0.0, 64.0, one_before_min_h, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}}, {{0.0, 0.0, 64.0, one_past_max_h, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}}, {{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}}, {{0.0, one_before_min_bound, 64.0, 1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01775"}}, {{0.0, one_past_max_bound, 64.0, -1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01776"}}, {{0.0, min_bound, 64.0, -1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01777"}}, {{0.0, max_bound, 64.0, 1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01233"}}}; for (const auto &test_case : test_cases) { for (const auto vuid : test_case.vuids) { if (vuid == "VUID-Undefined") m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "is less than VkPhysicalDeviceLimits::viewportBoundsRange[0]"); else m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); } vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &test_case.vp); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, SetDynViewportParamMaintenance1Tests) { TEST_DESCRIPTION("Verify errors are detected on misuse of SetViewport with a negative viewport extension enabled."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s VK_KHR_maintenance1 extension not supported -- skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); NegHeightViewportTests(m_device, m_commandBuffer, m_errorMonitor); } TEST_F(VkLayerTest, SetDynViewportParamMultiviewportTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetViewport with multiViewport feature enabled"); ASSERT_NO_FATAL_FAILURE(Init()); if (!m_device->phy().features().multiViewport) { printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix); return; } const auto max_viewports = m_device->props.limits.maxViewports; const uint32_t too_many_viewports = 65536 + 1; // let's say this is too much to allocate pViewports for m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter"); vkCmdSetViewport(m_commandBuffer->handle(), 0, max_viewports, nullptr); m_errorMonitor->VerifyFound(); if (max_viewports >= too_many_viewports) { printf( "%s VkPhysicalDeviceLimits::maxViewports is too large to practically test against -- skipping " "part of " "test.\n", kSkipPrefix); return; } const VkViewport vp = {0.0, 0.0, 64.0, 64.0, 0.0, 1.0}; const std::vector<VkViewport> viewports(max_viewports + 1, vp); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), 0, max_viewports + 1, viewports.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), max_viewports, 1, viewports.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), 1, max_viewports, viewports.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), max_viewports + 1, 0, viewports.data()); m_errorMonitor->VerifyFound(); } // // POSITIVE VALIDATION TESTS // // These tests do not expect to encounter ANY validation errors pass only if this is true TEST_F(VkPositiveLayerTest, PointSizeWriteInFunction) { TEST_DESCRIPTION("Create a pipeline using TOPOLOGY_POINT_LIST and write PointSize in vertex shader function."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(InitViewport()); // Create VS declaring PointSize and write to it in a function call. static const char PointSizeWriteVertShaderFcn[] = "#version 450\n" "vec2 vertices[3];\n" "out gl_PerVertex\n" "{\n" " vec4 gl_Position;\n" " float gl_PointSize;\n" "};\n" "void OutPointSize() {\n" " gl_PointSize = 7.0;\n" "}\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " OutPointSize();\n" "}\n"; VkShaderObj vs(m_device, PointSizeWriteVertShaderFcn, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); { VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&ps); // Set Input Assembly to TOPOLOGY POINT LIST VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; pipelineobj.SetInputAssembly(&ia_state); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); } m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, PointSizeGeomShaderSuccess) { TEST_DESCRIPTION( "Create a pipeline using TOPOLOGY_POINT_LIST, set PointSize vertex shader, and write in the final geometry stage."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); if ((!m_device->phy().features().geometryShader) || (!m_device->phy().features().shaderTessellationAndGeometryPointSize)) { printf("%s Device does not support the required geometry shader features; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitViewport()); // Create VS declaring PointSize and writing to it static const char PointSizeVertShader[] = "#version 450\n" "vec2 vertices[3];\n" "out gl_PerVertex\n" "{\n" " vec4 gl_Position;\n" " float gl_PointSize;\n" "};\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " gl_PointSize = 5.0;\n" "}\n"; static char const *gsSource = "#version 450\n" "layout (points) in;\n" "layout (points) out;\n" "layout (max_vertices = 1) out;\n" "void main() {\n" " gl_Position = vec4(1.0, 0.5, 0.5, 0.0);\n" " gl_PointSize = 3.3;\n" " EmitVertex();\n" "}\n"; VkShaderObj vs(m_device, PointSizeVertShader, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj gs(m_device, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&gs); pipelineobj.AddShader(&ps); // Set Input Assembly to TOPOLOGY POINT LIST VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; pipelineobj.SetInputAssembly(&ia_state); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, LoosePointSizeWrite) { TEST_DESCRIPTION("Create a pipeline using TOPOLOGY_POINT_LIST and write PointSize outside of a structure."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(InitViewport()); const std::string LoosePointSizeWrite = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Vertex %main "main" %glposition %glpointsize %gl_VertexIndex OpSource GLSL 450 OpName %main "main" OpName %vertices "vertices" OpName %glposition "glposition" OpName %glpointsize "glpointsize" OpName %gl_VertexIndex "gl_VertexIndex" OpDecorate %glposition BuiltIn Position OpDecorate %glpointsize BuiltIn PointSize OpDecorate %gl_VertexIndex BuiltIn VertexIndex %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v2float = OpTypeVector %float 2 %uint = OpTypeInt 32 0 %uint_3 = OpConstant %uint 3 %_arr_v2float_uint_3 = OpTypeArray %v2float %uint_3 %_ptr_Private__arr_v2float_uint_3 = OpTypePointer Private %_arr_v2float_uint_3 %vertices = OpVariable %_ptr_Private__arr_v2float_uint_3 Private %int = OpTypeInt 32 1 %int_0 = OpConstant %int 0 %float_n1 = OpConstant %float -1 %16 = OpConstantComposite %v2float %float_n1 %float_n1 %_ptr_Private_v2float = OpTypePointer Private %v2float %int_1 = OpConstant %int 1 %float_1 = OpConstant %float 1 %21 = OpConstantComposite %v2float %float_1 %float_n1 %int_2 = OpConstant %int 2 %float_0 = OpConstant %float 0 %25 = OpConstantComposite %v2float %float_0 %float_1 %v4float = OpTypeVector %float 4 %_ptr_Output_gl_Position = OpTypePointer Output %v4float %glposition = OpVariable %_ptr_Output_gl_Position Output %_ptr_Output_gl_PointSize = OpTypePointer Output %float %glpointsize = OpVariable %_ptr_Output_gl_PointSize Output %_ptr_Input_int = OpTypePointer Input %int %gl_VertexIndex = OpVariable %_ptr_Input_int Input %int_3 = OpConstant %int 3 %_ptr_Output_v4float = OpTypePointer Output %v4float %_ptr_Output_float = OpTypePointer Output %float %main = OpFunction %void None %3 %5 = OpLabel %18 = OpAccessChain %_ptr_Private_v2float %vertices %int_0 OpStore %18 %16 %22 = OpAccessChain %_ptr_Private_v2float %vertices %int_1 OpStore %22 %21 %26 = OpAccessChain %_ptr_Private_v2float %vertices %int_2 OpStore %26 %25 %33 = OpLoad %int %gl_VertexIndex %35 = OpSMod %int %33 %int_3 %36 = OpAccessChain %_ptr_Private_v2float %vertices %35 %37 = OpLoad %v2float %36 %38 = OpCompositeExtract %float %37 0 %39 = OpCompositeExtract %float %37 1 %40 = OpCompositeConstruct %v4float %38 %39 %float_0 %float_1 %42 = OpAccessChain %_ptr_Output_v4float %glposition OpStore %42 %40 OpStore %glpointsize %float_1 OpReturn OpFunctionEnd )"; // Create VS declaring PointSize and write to it in a function call. VkShaderObj vs(m_device, LoosePointSizeWrite, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); { VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&ps); // Set Input Assembly to TOPOLOGY POINT LIST VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; pipelineobj.SetInputAssembly(&ia_state); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); } m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, UncompressedToCompressedImageCopy) { TEST_DESCRIPTION("Image copies between compressed and uncompressed images"); ASSERT_NO_FATAL_FAILURE(Init()); // Verify format support // Size-compatible (64-bit) formats. Uncompressed is 64 bits per texel, compressed is 64 bits per 4x4 block (or 4bpt). if (!ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR) || !ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_BC1_RGBA_SRGB_BLOCK, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR)) { printf("%s Required formats/features not supported - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix); return; } VkImageObj uncomp_10x10t_image(m_device); // Size = 10 * 10 * 64 = 6400 VkImageObj comp_10x10b_40x40t_image(m_device); // Size = 40 * 40 * 4 = 6400 uncomp_10x10t_image.Init(10, 10, 1, VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); comp_10x10b_40x40t_image.Init(40, 40, 1, VK_FORMAT_BC1_RGBA_SRGB_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); if (!uncomp_10x10t_image.initialized() || !comp_10x10b_40x40t_image.initialized()) { printf("%s Unable to initialize surfaces - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix); return; } // Both copies represent the same number of bytes. Bytes Per Texel = 1 for bc6, 16 for uncompressed // Copy compressed to uncompressed VkImageCopy copy_region = {}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); // Copy from uncompressed to compressed copy_region.extent = {10, 10, 1}; // Dimensions in (uncompressed) texels vkCmdCopyImage(m_commandBuffer->handle(), uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); // And from compressed to uncompressed copy_region.extent = {40, 40, 1}; // Dimensions in (compressed) texels vkCmdCopyImage(m_commandBuffer->handle(), comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); } TEST_F(VkPositiveLayerTest, DeleteDescriptorSetLayoutsBeforeDescriptorSets) { TEST_DESCRIPTION("Create DSLayouts and DescriptorSets and then delete the DSLayouts before the DescriptorSets."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkResult err; m_errorMonitor->ExpectSuccess(); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool_one; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool_one); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; VkDescriptorSet descriptorSet; { const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool_one; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); } // ds_layout destroyed err = vkFreeDescriptorSets(m_device->device(), ds_pool_one, 1, &descriptorSet); vkDestroyDescriptorPool(m_device->device(), ds_pool_one, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CommandPoolDeleteWithReferences) { TEST_DESCRIPTION("Ensure the validation layers bookkeeping tracks the implicit command buffer frees."); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandPoolCreateInfo cmd_pool_info = {}; cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmd_pool_info.pNext = NULL; cmd_pool_info.queueFamilyIndex = m_device->graphics_queue_node_index_; cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; cmd_pool_info.flags = 0; VkCommandPool secondary_cmd_pool; VkResult res = vkCreateCommandPool(m_device->handle(), &cmd_pool_info, NULL, &secondary_cmd_pool); ASSERT_VK_SUCCESS(res); VkCommandBufferAllocateInfo cmdalloc = vk_testing::CommandBuffer::create_info(secondary_cmd_pool); cmdalloc.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; VkCommandBuffer secondary_cmds; res = vkAllocateCommandBuffers(m_device->handle(), &cmdalloc, &secondary_cmds); VkCommandBufferInheritanceInfo cmd_buf_inheritance_info = {}; cmd_buf_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; cmd_buf_inheritance_info.pNext = NULL; cmd_buf_inheritance_info.renderPass = VK_NULL_HANDLE; cmd_buf_inheritance_info.subpass = 0; cmd_buf_inheritance_info.framebuffer = VK_NULL_HANDLE; cmd_buf_inheritance_info.occlusionQueryEnable = VK_FALSE; cmd_buf_inheritance_info.queryFlags = 0; cmd_buf_inheritance_info.pipelineStatistics = 0; VkCommandBufferBeginInfo secondary_begin = {}; secondary_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; secondary_begin.pNext = NULL; secondary_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; secondary_begin.pInheritanceInfo = &cmd_buf_inheritance_info; res = vkBeginCommandBuffer(secondary_cmds, &secondary_begin); ASSERT_VK_SUCCESS(res); vkEndCommandBuffer(secondary_cmds); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_cmds); m_commandBuffer->end(); // DestroyCommandPool *implicitly* frees the command buffers allocated from it vkDestroyCommandPool(m_device->handle(), secondary_cmd_pool, NULL); // If bookkeeping has been lax, validating the reset will attempt to touch deleted data res = vkResetCommandPool(m_device->handle(), m_commandPool->handle(), 0); ASSERT_VK_SUCCESS(res); } TEST_F(VkLayerTest, SecondaryCommandBufferClearColorAttachmentsRenderArea) { TEST_DESCRIPTION( "Create a secondary command buffer with CmdClearAttachments call that has a rect outside of renderPass renderArea"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkCommandBufferAllocateInfo command_buffer_allocate_info = {}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = m_commandPool->handle(); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; command_buffer_allocate_info.commandBufferCount = 1; VkCommandBuffer secondary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer)); VkCommandBufferBeginInfo command_buffer_begin_info = {}; VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {}; command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; command_buffer_inheritance_info.renderPass = m_renderPass; command_buffer_inheritance_info.framebuffer = m_framebuffer; command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info; vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0; color_attachment.clearValue.color.float32[1] = 0; color_attachment.clearValue.color.float32[2] = 0; color_attachment.clearValue.color.float32[3] = 0; color_attachment.colorAttachment = 0; // x extent of 257 exceeds render area of 256 VkClearRect clear_rect = {{{0, 0}, {257, 32}}}; vkCmdClearAttachments(secondary_command_buffer, 1, &color_attachment, 1, &clear_rect); vkEndCommandBuffer(secondary_command_buffer); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00016"); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_command_buffer); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); } TEST_F(VkPositiveLayerTest, SecondaryCommandBufferClearColorAttachments) { TEST_DESCRIPTION("Create a secondary command buffer and record a CmdClearAttachments call into it"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkCommandBufferAllocateInfo command_buffer_allocate_info = {}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = m_commandPool->handle(); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; command_buffer_allocate_info.commandBufferCount = 1; VkCommandBuffer secondary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer)); VkCommandBufferBeginInfo command_buffer_begin_info = {}; VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {}; command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; command_buffer_inheritance_info.renderPass = m_renderPass; command_buffer_inheritance_info.framebuffer = m_framebuffer; command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info; vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0; color_attachment.clearValue.color.float32[1] = 0; color_attachment.clearValue.color.float32[2] = 0; color_attachment.clearValue.color.float32[3] = 0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {32, 32}}}; vkCmdClearAttachments(secondary_command_buffer, 1, &color_attachment, 1, &clear_rect); vkEndCommandBuffer(secondary_command_buffer); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_command_buffer); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, SecondaryCommandBufferImageLayoutTransitions) { TEST_DESCRIPTION("Perform an image layout transition in a secondary command buffer followed by a transition in the primary."); VkResult err; m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't find depth stencil format.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Allocate a secondary and primary cmd buffer VkCommandBufferAllocateInfo command_buffer_allocate_info = {}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = m_commandPool->handle(); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; command_buffer_allocate_info.commandBufferCount = 1; VkCommandBuffer secondary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer)); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; VkCommandBuffer primary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &primary_command_buffer)); VkCommandBufferBeginInfo command_buffer_begin_info = {}; VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {}; command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info; err = vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(secondary_command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); err = vkEndCommandBuffer(secondary_command_buffer); ASSERT_VK_SUCCESS(err); // Now update primary cmd buffer to execute secondary and transitions image command_buffer_begin_info.pInheritanceInfo = nullptr; err = vkBeginCommandBuffer(primary_command_buffer, &command_buffer_begin_info); ASSERT_VK_SUCCESS(err); vkCmdExecuteCommands(primary_command_buffer, 1, &secondary_command_buffer); VkImageMemoryBarrier img_barrier2 = {}; img_barrier2.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier2.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier2.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier2.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier2.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier2.image = image.handle(); img_barrier2.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier2.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier2.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; img_barrier2.subresourceRange.baseArrayLayer = 0; img_barrier2.subresourceRange.baseMipLevel = 0; img_barrier2.subresourceRange.layerCount = 1; img_barrier2.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(primary_command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier2); err = vkEndCommandBuffer(primary_command_buffer); ASSERT_VK_SUCCESS(err); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &primary_command_buffer; err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); err = vkDeviceWaitIdle(m_device->device()); ASSERT_VK_SUCCESS(err); vkFreeCommandBuffers(m_device->device(), m_commandPool->handle(), 1, &secondary_command_buffer); vkFreeCommandBuffers(m_device->device(), m_commandPool->handle(), 1, &primary_command_buffer); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, IgnoreUnrelatedDescriptor) { TEST_DESCRIPTION( "Ensure that the vkUpdateDescriptorSets validation code is ignoring VkWriteDescriptorSet members that are not related to " "the descriptor type specified by VkWriteDescriptorSet::descriptorType. Correct validation behavior will result in the " "test running to completion without validation errors."); const uintptr_t invalid_ptr = 0xcdcdcdcd; ASSERT_NO_FATAL_FAILURE(Init()); // Verify VK_FORMAT_R8_UNORM supports VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT const VkFormat format_texel_case = VK_FORMAT_R8_UNORM; const char *format_texel_case_string = "VK_FORMAT_R8_UNORM"; VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), format_texel_case, &format_properties); if (!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)) { printf("%s Test requires %s to support VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT\n", kSkipPrefix, format_texel_case_string); return; } // Image Case { m_errorMonitor->ExpectSuccess(); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkDescriptorImageInfo image_info = {}; image_info.imageView = view; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; descriptor_write.pImageInfo = &image_info; // Set pBufferInfo and pTexelBufferView to invalid values, which should // be // ignored for descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE. // This will most likely produce a crash if the parameter_validation // layer // does not correctly ignore pBufferInfo. descriptor_write.pBufferInfo = reinterpret_cast<const VkDescriptorBufferInfo *>(invalid_ptr); descriptor_write.pTexelBufferView = reinterpret_cast<const VkBufferView *>(invalid_ptr); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); } // Buffer Case { m_errorMonitor->ExpectSuccess(); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkDescriptorBufferInfo buffer_info = {}; buffer_info.buffer = buffer; buffer_info.offset = 0; buffer_info.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = &buffer_info; // Set pImageInfo and pTexelBufferView to invalid values, which should // be // ignored for descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER. // This will most likely produce a crash if the parameter_validation // layer // does not correctly ignore pImageInfo. descriptor_write.pImageInfo = reinterpret_cast<const VkDescriptorImageInfo *>(invalid_ptr); descriptor_write.pTexelBufferView = reinterpret_cast<const VkBufferView *>(invalid_ptr); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); } // Texel Buffer Case { m_errorMonitor->ExpectSuccess(); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferViewCreateInfo buff_view_ci = {}; buff_view_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; buff_view_ci.buffer = buffer; buff_view_ci.format = format_texel_case; buff_view_ci.range = VK_WHOLE_SIZE; VkBufferView buffer_view; err = vkCreateBufferView(m_device->device(), &buff_view_ci, NULL, &buffer_view); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &buffer_view; // Set pImageInfo and pBufferInfo to invalid values, which should be // ignored for descriptorType == // VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER. // This will most likely produce a crash if the parameter_validation // layer // does not correctly ignore pImageInfo and pBufferInfo. descriptor_write.pImageInfo = reinterpret_cast<const VkDescriptorImageInfo *>(invalid_ptr); descriptor_write.pBufferInfo = reinterpret_cast<const VkDescriptorBufferInfo *>(invalid_ptr); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); vkDestroyBufferView(m_device->device(), buffer_view, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); } } TEST_F(VkPositiveLayerTest, ImmutableSamplerOnlyDescriptor) { TEST_DESCRIPTION("Bind a DescriptorSet with only an immutable sampler and make sure that we don't warn for no update."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); m_errorMonitor->VerifyNotFound(); vkDestroySampler(m_device->device(), sampler, NULL); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DuplicateDescriptorBinding) { TEST_DESCRIPTION("Create a descriptor set layout with a duplicate binding number."); ASSERT_NO_FATAL_FAILURE(Init()); // Create layout where two binding #s are "1" static const uint32_t NUM_BINDINGS = 3; VkDescriptorSetLayoutBinding dsl_binding[NUM_BINDINGS] = {}; dsl_binding[0].binding = 1; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[0].descriptorCount = 1; dsl_binding[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[0].pImmutableSamplers = NULL; dsl_binding[1].binding = 0; dsl_binding[1].descriptorCount = 1; dsl_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[1].descriptorCount = 1; dsl_binding[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[1].pImmutableSamplers = NULL; dsl_binding[2].binding = 1; // Duplicate binding should cause error dsl_binding[2].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[2].descriptorCount = 1; dsl_binding[2].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[2].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; ds_layout_ci.bindingCount = NUM_BINDINGS; ds_layout_ci.pBindings = dsl_binding; VkDescriptorSetLayout ds_layout; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-binding-00279"); vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidPushDescriptorSetLayout) { TEST_DESCRIPTION("Create a push descriptor set layout with invalid bindings."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Find address of extension call and make the call PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); assert(vkGetPhysicalDeviceProperties2KHR != nullptr); // Get the push descriptor limits auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop); vkGetPhysicalDeviceProperties2KHR(m_device->phy().handle(), &prop2); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; // Note that as binding is referenced in ds_layout_ci, it is effectively in the closure by reference as well. auto test_create_ds_layout = [&ds_layout_ci, this](std::string error) { VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error); vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); }; // Starting with the initial VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC type set above.. test_create_ds_layout("VUID-VkDescriptorSetLayoutCreateInfo-flags-00280"); binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; test_create_ds_layout( "VUID-VkDescriptorSetLayoutCreateInfo-flags-00280"); // This is the same VUID as above, just a second error condition. if (!(push_descriptor_prop.maxPushDescriptors == std::numeric_limits<uint32_t>::max())) { binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; binding.descriptorCount = push_descriptor_prop.maxPushDescriptors + 1; test_create_ds_layout("VUID-VkDescriptorSetLayoutCreateInfo-flags-00281"); } else { printf("%s maxPushDescriptors is set to maximum unit32_t value, skipping 'out of range test'.\n", kSkipPrefix); } } TEST_F(VkLayerTest, PushDescriptorSetLayoutWithoutExtension) { TEST_DESCRIPTION("Create a push descriptor set layout without loading the needed extension."); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; std::string error = "Attempted to use VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR in "; error = error + "VkDescriptorSetLayoutCreateInfo::flags but its required extension "; error = error + VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME; error = error + " has not been enabled."; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error.c_str()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-flags-00281"); VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, DescriptorIndexingSetLayoutWithoutExtension) { TEST_DESCRIPTION("Create an update_after_bind set layout without loading the needed extension."); ASSERT_NO_FATAL_FAILURE(Init()); auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; std::string error = "Attemped to use VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT in "; error = error + "VkDescriptorSetLayoutCreateInfo::flags but its required extension "; error = error + VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME; error = error + " has not been enabled."; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error.c_str()); VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, DescriptorIndexingSetLayout) { TEST_DESCRIPTION("Exercise various create/allocate-time errors related to VK_EXT_descriptor_indexing."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array<const char *, 2> required_device_extensions = { {VK_KHR_MAINTENANCE3_EXTENSION_NAME, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables all supported indexing features except descriptorBindingUniformBufferUpdateAfterBind auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); indexing_features.descriptorBindingUniformBufferUpdateAfterBind = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); VkDescriptorBindingFlagsEXT flags = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT; auto flags_create_info = lvl_init_struct<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(); flags_create_info.bindingCount = 1; flags_create_info.pBindingFlags = &flags; VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(&flags_create_info); ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; // VU for VkDescriptorSetLayoutBindingFlagsCreateInfoEXT::bindingCount flags_create_info.bindingCount = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-bindingCount-03002"); VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); flags_create_info.bindingCount = 1; // set is missing UPDATE_AFTER_BIND_POOL flag. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-flags-03000"); // binding uses a feature we disabled m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingUniformBufferUpdateAfterBind-03005"); err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; ds_layout_ci.bindingCount = 0; flags_create_info.bindingCount = 0; err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorPoolSize pool_size = {binding.descriptorType, binding.descriptorCount}; auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; VkDescriptorPool pool; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; // mismatch between descriptor set and pool m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-03044"); vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); if (indexing_features.descriptorBindingVariableDescriptorCount) { ds_layout_ci.flags = 0; ds_layout_ci.bindingCount = 1; flags_create_info.bindingCount = 1; flags = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT; err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); pool_size = {binding.descriptorType, binding.descriptorCount}; dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto count_alloc_info = lvl_init_struct<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>(); count_alloc_info.descriptorSetCount = 1; // Set variable count larger than what was in the descriptor binding uint32_t variable_count = 2; count_alloc_info.pDescriptorCounts = &variable_count; ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(&count_alloc_info); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; ds = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-pSetLayouts-03046"); vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); } } TEST_F(VkLayerTest, DescriptorIndexingUpdateAfterBind) { TEST_DESCRIPTION("Exercise errors for updating a descriptor set after it is bound."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE3_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE3_EXTENSION_NAME); } else { printf("%s Descriptor Indexing or Maintenance3 Extension not supported, skipping tests\n", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables all supported indexing features except descriptorBindingUniformBufferUpdateAfterBind auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); indexing_features.descriptorBindingUniformBufferUpdateAfterBind = VK_FALSE; if (VK_FALSE == indexing_features.descriptorBindingStorageBufferUpdateAfterBind) { printf("%s Test requires (unsupported) descriptorBindingStorageBufferUpdateAfterBind, skipping\n", kSkipPrefix); return; } if (VK_FALSE == features2.features.fragmentStoresAndAtomics) { printf("%s Test requires (unsupported) fragmentStoresAndAtomics, skipping\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorBindingFlagsEXT flags[2] = {0, VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT}; auto flags_create_info = lvl_init_struct<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(); flags_create_info.bindingCount = 2; flags_create_info.pBindingFlags = &flags[0]; // Descriptor set has two bindings - only the second is update_after_bind VkDescriptorSetLayoutBinding binding[2] = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(&flags_create_info); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; ds_layout_ci.bindingCount = 2; ds_layout_ci.pBindings = &binding[0]; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); VkDescriptorPoolSize pool_sizes[2] = { {binding[0].descriptorType, binding[0].descriptorCount}, {binding[1].descriptorType, binding[1].descriptorCount}, }; auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT; dspci.poolSizeCount = 2; dspci.pPoolSizes = &pool_sizes[0]; dspci.maxSets = 1; VkDescriptorPool pool; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); ASSERT_VK_SUCCESS(err); VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), dyub, &mem_reqs); VkMemoryAllocateInfo mem_alloc_info = {}; mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc_info.allocationSize = mem_reqs.size; m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); err = vkAllocateMemory(m_device->device(), &mem_alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buffInfo[2] = {}; buffInfo[0].buffer = dyub; buffInfo[0].offset = 0; buffInfo[0].range = 1024; VkWriteDescriptorSet descriptor_write[2] = {}; descriptor_write[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write[0].dstSet = ds; descriptor_write[0].dstBinding = 0; descriptor_write[0].descriptorCount = 1; descriptor_write[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write[0].pBufferInfo = buffInfo; descriptor_write[1] = descriptor_write[0]; descriptor_write[1].dstBinding = 1; descriptor_write[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); // Create a dummy pipeline, since VL inspects which bindings are actually used at draw time char const *vsSource = "#version 450\n" "void main(){\n" " gl_Position = vec4(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "layout(set=0, binding=0) uniform foo0 { float x0; } bar0;\n" "layout(set=0, binding=1) buffer foo1 { float x1; } bar1;\n" "void main(){\n" " color = vec4(bar0.x0 + bar1.x1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.CreateVKPipeline(pipeline_layout, m_renderPass); // Make both bindings valid before binding to the command buffer vkUpdateDescriptorSets(m_device->device(), 2, &descriptor_write[0], 0, NULL); m_errorMonitor->VerifyNotFound(); // Two subtests. First only updates the update_after_bind binding and expects // no error. Second updates the other binding and expects an error when the // command buffer is ended. for (uint32_t i = 0; i < 2; ++i) { m_commandBuffer->begin(); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1, &ds, 0, NULL); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdDraw(m_commandBuffer->handle(), 0, 0, 0, 0); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); // Valid to update binding 1 after being bound vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write[1], 0, NULL); m_errorMonitor->VerifyNotFound(); if (i == 0) { // expect no errors m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } else { // Invalid to update binding 0 after being bound. But the error is actually // generated during vkEndCommandBuffer vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write[0], 0, NULL); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "is invalid because bound DescriptorSet"); vkEndCommandBuffer(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); } } vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); vkDestroyBuffer(m_device->handle(), dyub, NULL); vkFreeMemory(m_device->handle(), mem, NULL); vkDestroyPipelineLayout(m_device->handle(), pipeline_layout, NULL); } TEST_F(VkLayerTest, AllocatePushDescriptorSet) { TEST_DESCRIPTION("Attempt to allocate a push descriptor set."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorPoolSize pool_size = {binding.descriptorType, binding.descriptorCount}; auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; VkDescriptorPool pool; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308"); vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, PushDescriptorSetCmdPushBadArgs) { TEST_DESCRIPTION("Attempt to push a push descriptor set with incorrect arguments."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Create ordinary and push descriptor set layout VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj ds_layout(m_device, {binding}); ASSERT_TRUE(ds_layout.initialized()); const VkDescriptorSetLayoutObj push_ds_layout(m_device, {binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); ASSERT_TRUE(push_ds_layout.initialized()); // Now use the descriptor set layouts to create a pipeline layout const VkPipelineLayoutObj pipeline_layout(m_device, {&push_ds_layout, &ds_layout}); ASSERT_TRUE(pipeline_layout.initialized()); // Create a descriptor to push const uint32_t buffer_data[4] = {4, 5, 6, 7}; VkConstantBufferObj buffer_obj(m_device, sizeof(buffer_data), &buffer_data); ASSERT_TRUE(buffer_obj.initialized()); // Create a "write" struct, noting that the buffer_info cannot be a temporary arg (the return from write_descriptor_set // references its data), and the DescriptorSet() can be temporary, because the value is ignored VkDescriptorBufferInfo buffer_info = {buffer_obj.handle(), 0, VK_WHOLE_SIZE}; VkWriteDescriptorSet descriptor_write = vk_testing::Device::write_descriptor_set( vk_testing::DescriptorSet(), 0, 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, &buffer_info); // Find address of extension call and make the call PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); ASSERT_TRUE(vkCmdPushDescriptorSetKHR != nullptr); // Section 1: Queue family matching/capabilities. // Create command pool on a non-graphics queue const uint32_t no_gfx_qfi = m_device->QueueFamilyMatching(VK_QUEUE_COMPUTE_BIT, VK_QUEUE_GRAPHICS_BIT); const uint32_t transfer_only_qfi = m_device->QueueFamilyMatching(VK_QUEUE_TRANSFER_BIT, (VK_QUEUE_COMPUTE_BIT | VK_QUEUE_GRAPHICS_BIT)); if ((UINT32_MAX == transfer_only_qfi) && (UINT32_MAX == no_gfx_qfi)) { printf("%s No compute or transfer only queue family, skipping bindpoint and queue tests.", kSkipPrefix); } else { const uint32_t err_qfi = (UINT32_MAX == no_gfx_qfi) ? transfer_only_qfi : no_gfx_qfi; VkCommandPoolObj command_pool(m_device, err_qfi); ASSERT_TRUE(command_pool.initialized()); VkCommandBufferObj command_buffer(m_device, &command_pool); ASSERT_TRUE(command_buffer.initialized()); command_buffer.begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"); if (err_qfi == transfer_only_qfi) { // This as this queue neither supports the gfx or compute bindpoints, we'll get two errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool"); } vkCmdPushDescriptorSetKHR(command_buffer.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyFound(); command_buffer.end(); // If we succeed in testing only one condition above, we need to test the other below. if ((UINT32_MAX != transfer_only_qfi) && (err_qfi != transfer_only_qfi)) { // Need to test the neither compute/gfx supported case separately. VkCommandPoolObj tran_command_pool(m_device, transfer_only_qfi); ASSERT_TRUE(tran_command_pool.initialized()); VkCommandBufferObj tran_command_buffer(m_device, &tran_command_pool); ASSERT_TRUE(tran_command_buffer.initialized()); tran_command_buffer.begin(); // We can't avoid getting *both* errors in this case m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool"); vkCmdPushDescriptorSetKHR(tran_command_buffer.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyFound(); tran_command_buffer.end(); } } // Push to the non-push binding m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-set-00365"); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 1, 1, &descriptor_write); m_errorMonitor->VerifyFound(); // Specify set out of bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-set-00364"); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 2, 1, &descriptor_write); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); // This is a test for VUID-vkCmdPushDescriptorSetKHR-commandBuffer-recording // TODO: Add VALIDATION_ERROR_ code support to core_validation::ValidateCmd m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "You must call vkBeginCommandBuffer() before this call to vkCmdPushDescriptorSetKHR()"); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SetDynScissorParamTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetScissor without multiViewport feature"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); const VkRect2D scissor = {{0, 0}, {16, 16}}; const VkRect2D scissors[] = {scissor, scissor}; m_commandBuffer->begin(); // array tests m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593"); vkCmdSetScissor(m_commandBuffer->handle(), 1, 1, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-00594"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 2, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); vkCmdSetScissor(m_commandBuffer->handle(), 1, 0, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-00594"); vkCmdSetScissor(m_commandBuffer->handle(), 1, 2, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-pScissors-parameter"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, nullptr); m_errorMonitor->VerifyFound(); struct TestCase { VkRect2D scissor; std::string vuid; }; std::vector<TestCase> test_cases = {{{{-1, 0}, {16, 16}}, "VUID-vkCmdSetScissor-x-00595"}, {{{0, -1}, {16, 16}}, "VUID-vkCmdSetScissor-x-00595"}, {{{1, 0}, {INT32_MAX, 16}}, "VUID-vkCmdSetScissor-offset-00596"}, {{{INT32_MAX, 0}, {1, 16}}, "VUID-vkCmdSetScissor-offset-00596"}, {{{0, 0}, {uint32_t{INT32_MAX} + 1, 16}}, "VUID-vkCmdSetScissor-offset-00596"}, {{{0, 1}, {16, INT32_MAX}}, "VUID-vkCmdSetScissor-offset-00597"}, {{{0, INT32_MAX}, {16, 1}}, "VUID-vkCmdSetScissor-offset-00597"}, {{{0, 0}, {16, uint32_t{INT32_MAX} + 1}}, "VUID-vkCmdSetScissor-offset-00597"}}; for (const auto &test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuid); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &test_case.scissor); m_errorMonitor->VerifyFound(); } m_commandBuffer->end(); } TEST_F(VkLayerTest, SetDynScissorParamMultiviewportTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetScissor with multiViewport feature enabled"); ASSERT_NO_FATAL_FAILURE(Init()); if (!m_device->phy().features().multiViewport) { printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix); return; } const auto max_scissors = m_device->props.limits.maxViewports; const uint32_t too_many_scissors = 65536 + 1; // let's say this is too much to allocate pScissors for m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-pScissors-parameter"); vkCmdSetScissor(m_commandBuffer->handle(), 0, max_scissors, nullptr); m_errorMonitor->VerifyFound(); if (max_scissors >= too_many_scissors) { printf( "%s VkPhysicalDeviceLimits::maxViewports is too large to practically test against -- skipping " "part of " "test.\n", kSkipPrefix); return; } const VkRect2D scissor = {{0, 0}, {16, 16}}; const std::vector<VkRect2D> scissors(max_scissors + 1, scissor); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), 0, max_scissors + 1, scissors.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), max_scissors, 1, scissors.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), 1, max_scissors, scissors.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), max_scissors + 1, 0, scissors.data()); m_errorMonitor->VerifyFound(); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, EmptyDescriptorUpdateTest) { TEST_DESCRIPTION("Update last descriptor in a set that includes an empty binding"); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); // Create layout with two uniform buffer descriptors w/ empty binding between them OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0 /*!*/, 0, nullptr}, {2, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create a buffer to be used for update VkBufferCreateInfo buff_ci = {}; buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buff_ci.size = 256; buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Have to bind memory to buffer before descriptor update VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 512; // one allocation for both buffers mem_alloc.memoryTypeIndex = 0; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } // Make sure allocation is sufficiently large to accommodate buffer requirements if (mem_reqs.size > mem_alloc.allocationSize) { mem_alloc.allocationSize = mem_reqs.size; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); // Only update the descriptor at binding 2 VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer; buff_info.offset = 0; buff_info.range = VK_WHOLE_SIZE; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = ds.set_; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); // Cleanup vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } TEST_F(VkLayerTest, MultiplePushDescriptorSets) { TEST_DESCRIPTION("Verify an error message for multiple push descriptor sets."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const unsigned int descriptor_set_layout_count = 2; std::vector<VkDescriptorSetLayoutObj> ds_layouts; for (uint32_t i = 0; i < descriptor_set_layout_count; ++i) { dsl_binding.binding = i; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding), VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); } const auto &ds_vk_layouts = MakeVkHandles<VkDescriptorSetLayout>(ds_layouts); VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.pushConstantRangeCount = 0; pipeline_layout_ci.pPushConstantRanges = NULL; pipeline_layout_ci.setLayoutCount = ds_vk_layouts.size(); pipeline_layout_ci.pSetLayouts = ds_vk_layouts.data(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293"); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateDescriptorUpdateTemplate) { TEST_DESCRIPTION("Verify error messages for invalid vkCreateDescriptorUpdateTemplate calls."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME); } else { printf("%s Push Descriptors and Descriptor Update Template Extensions not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_ub(m_device, {dsl_binding}); const VkDescriptorSetLayoutObj ds_layout_ub1(m_device, {dsl_binding}); const VkDescriptorSetLayoutObj ds_layout_ub_push(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); const VkPipelineLayoutObj pipeline_layout(m_device, {{&ds_layout_ub, &ds_layout_ub1, &ds_layout_ub_push}}); PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateDescriptorUpdateTemplateKHR"); ASSERT_NE(vkCreateDescriptorUpdateTemplateKHR, nullptr); PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkDestroyDescriptorUpdateTemplateKHR"); ASSERT_NE(vkDestroyDescriptorUpdateTemplateKHR, nullptr); VkDescriptorUpdateTemplateEntry entries = {0, 0, 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0, sizeof(VkBuffer)}; VkDescriptorUpdateTemplateCreateInfo create_info = {}; create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO; create_info.pNext = nullptr; create_info.flags = 0; create_info.descriptorUpdateEntryCount = 1; create_info.pDescriptorUpdateEntries = &entries; auto do_test = [&](std::string err) { VkDescriptorUpdateTemplateKHR dut = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, err); if (VK_SUCCESS == vkCreateDescriptorUpdateTemplateKHR(m_device->handle(), &create_info, nullptr, &dut)) { vkDestroyDescriptorUpdateTemplateKHR(m_device->handle(), dut, nullptr); } m_errorMonitor->VerifyFound(); }; // Descriptor set type template create_info.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET; // descriptorSetLayout is NULL do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350"); // Push descriptor type template create_info.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR; create_info.pipelineBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE; create_info.pipelineLayout = pipeline_layout.handle(); create_info.set = 2; // Bad bindpoint -- force fuzz the bind point memset(&create_info.pipelineBindPoint, 0xFE, sizeof(create_info.pipelineBindPoint)); do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351"); create_info.pipelineBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE; // Bad pipeline layout create_info.pipelineLayout = VK_NULL_HANDLE; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352"); create_info.pipelineLayout = pipeline_layout.handle(); // Wrong set # create_info.set = 0; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353"); // Invalid set # create_info.set = 42; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353"); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, PushDescriptorNullDstSetTest) { TEST_DESCRIPTION("Use null dstSet in CmdPushDescriptorSetKHR"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 2; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); // Now use the descriptor layout to create a pipeline layout const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); static const float vbo_data[3] = {1.f, 0.f, 1.f}; VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); VkDescriptorBufferInfo buff_info; buff_info.buffer = vbo.handle(); buff_info.offset = 0; buff_info.range = sizeof(vbo_data); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = 0; // Should not cause a validation error // Find address of extension call and make the call PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); assert(vkCmdPushDescriptorSetKHR != nullptr); m_commandBuffer->begin(); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, PushDescriptorUnboundSetTest) { TEST_DESCRIPTION("Ensure that no validation errors are produced for not bound push descriptor sets"); VkResult err; if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->ExpectSuccess(); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); // Create descriptor set layout VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 2; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); // Create push descriptor set layout const VkDescriptorSetLayoutObj push_ds_layout(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); // Allocate descriptor set VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.descriptorPool = ds_pool; alloc_info.descriptorSetCount = 1; alloc_info.pSetLayouts = &ds_layout.handle(); VkDescriptorSet descriptor_set; err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); // Now use the descriptor layouts to create a pipeline layout const VkPipelineLayoutObj pipeline_layout(m_device, {&push_ds_layout, &ds_layout}); // Create PSO char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=2) uniform foo1 { float x; } bar1;\n" "layout(set=1) layout(binding=2) uniform foo2 { float y; } bar2;\n" "void main(){\n" " x = vec4(bar1.x) + vec4(bar2.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); static const float bo_data[1] = {1.f}; VkConstantBufferObj buffer(m_device, sizeof(bo_data), (const void *)&bo_data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); // Update descriptor set VkDescriptorBufferInfo buff_info; buff_info.buffer = buffer.handle(); buff_info.offset = 0; buff_info.range = sizeof(bo_data); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = descriptor_set; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); assert(vkCmdPushDescriptorSetKHR != nullptr); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Push descriptors and bind descriptor set vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 1, 1, &descriptor_set, 0, NULL); // No errors should be generated. vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyNotFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, TestAliasedMemoryTracking) { VkResult err; bool pass; TEST_DESCRIPTION( "Create a buffer, allocate memory, bind memory, destroy the buffer, create an image, and bind the same memory to it"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkImage image; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 256; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; // Ensure memory is big enough for both bindings alloc_info.allocationSize = 0x10000; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); uint8_t *pData; err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); memset(pData, 0xCADECADE, static_cast<size_t>(mem_reqs.size)); vkUnmapMemory(m_device->device(), mem); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); // NOW, destroy the buffer. Obviously, the resource no longer occupies this // memory. In fact, it was never used by the GPU. // Just be sure, wait for idle. vkDestroyBuffer(m_device->device(), buffer, NULL); vkDeviceWaitIdle(m_device->device()); // Use optimal as some platforms report linear support but then fail image creation VkImageTiling image_tiling = VK_IMAGE_TILING_OPTIMAL; VkImageFormatProperties image_format_properties; vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, image_tiling, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0, &image_format_properties); if (image_format_properties.maxExtent.width == 0) { printf("%s Image format not supported; skipped.\n", kSkipPrefix); vkFreeMemory(m_device->device(), mem, NULL); return; } VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = image_tiling; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = 0; /* Create a mappable image. It will be the texture if linear images are OK * to be textures or it will be the staging image if they are not. */ err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyImage(m_device->device(), image, NULL); return; } // VALIDATION FAILURE: err = vkBindImageMemory(m_device->device(), image, mem, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyImage(m_device->device(), image, NULL); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, TestDestroyFreeNullHandles) { VkResult err; TEST_DESCRIPTION("Call all applicable destroy and free routines with NULL handles, expecting no validation errors"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); vkDestroyBuffer(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyBufferView(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyCommandPool(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyDescriptorPool(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyDescriptorSetLayout(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyDevice(VK_NULL_HANDLE, NULL); vkDestroyEvent(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyFence(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyFramebuffer(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyImage(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyImageView(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyInstance(VK_NULL_HANDLE, NULL); vkDestroyPipeline(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyPipelineCache(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyPipelineLayout(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyQueryPool(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyRenderPass(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroySampler(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroySemaphore(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyShaderModule(m_device->device(), VK_NULL_HANDLE, NULL); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffers[3] = {}; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffers[1]); vkFreeCommandBuffers(m_device->device(), command_pool, 3, command_buffers); vkDestroyCommandPool(m_device->device(), command_pool, NULL); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 2; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptor_sets[3] = {}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_sets[1]); ASSERT_VK_SUCCESS(err); vkFreeDescriptorSets(m_device->device(), ds_pool, 3, descriptor_sets); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); vkFreeMemory(m_device->device(), VK_NULL_HANDLE, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, QueueSubmitSemaphoresAndLayoutTracking) { TEST_DESCRIPTION("Submit multiple command buffers with chained semaphore signals and layout transitions"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBuffer cmd_bufs[4]; VkCommandBufferAllocateInfo alloc_info; alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.commandBufferCount = 4; alloc_info.commandPool = m_commandPool->handle(); alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &alloc_info, cmd_bufs); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT), VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkCommandBufferBeginInfo cb_binfo; cb_binfo.pNext = NULL; cb_binfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cb_binfo.pInheritanceInfo = VK_NULL_HANDLE; cb_binfo.flags = 0; // Use 4 command buffers, each with an image layout transition, ColorAO->General->ColorAO->TransferSrc->TransferDst vkBeginCommandBuffer(cmd_bufs[0], &cb_binfo); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = NULL; img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(cmd_bufs[0], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[0]); vkBeginCommandBuffer(cmd_bufs[1], &cb_binfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; vkCmdPipelineBarrier(cmd_bufs[1], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[1]); vkBeginCommandBuffer(cmd_bufs[2], &cb_binfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; vkCmdPipelineBarrier(cmd_bufs[2], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[2]); vkBeginCommandBuffer(cmd_bufs[3], &cb_binfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; vkCmdPipelineBarrier(cmd_bufs[3], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[3]); // Submit 4 command buffers in 3 submits, with submits 2 and 3 waiting for semaphores from submits 1 and 2 VkSemaphore semaphore1, semaphore2; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore1); vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore2); VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info[3]; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].pNext = nullptr; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = &cmd_bufs[0]; submit_info[0].signalSemaphoreCount = 1; submit_info[0].pSignalSemaphores = &semaphore1; submit_info[0].waitSemaphoreCount = 0; submit_info[0].pWaitDstStageMask = nullptr; submit_info[0].pWaitDstStageMask = flags; submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[1].pNext = nullptr; submit_info[1].commandBufferCount = 1; submit_info[1].pCommandBuffers = &cmd_bufs[1]; submit_info[1].waitSemaphoreCount = 1; submit_info[1].pWaitSemaphores = &semaphore1; submit_info[1].signalSemaphoreCount = 1; submit_info[1].pSignalSemaphores = &semaphore2; submit_info[1].pWaitDstStageMask = flags; submit_info[2].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[2].pNext = nullptr; submit_info[2].commandBufferCount = 2; submit_info[2].pCommandBuffers = &cmd_bufs[2]; submit_info[2].waitSemaphoreCount = 1; submit_info[2].pWaitSemaphores = &semaphore2; submit_info[2].signalSemaphoreCount = 0; submit_info[2].pSignalSemaphores = nullptr; submit_info[2].pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 3, submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); vkDestroySemaphore(m_device->device(), semaphore1, NULL); vkDestroySemaphore(m_device->device(), semaphore2, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, DynamicOffsetWithInactiveBinding) { // Create a descriptorSet w/ dynamic descriptors where 1 binding is inactive // We previously had a bug where dynamic offset of inactive bindings was still being used VkResult err; m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {2, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); // Create two buffers to update the descriptors with // The first will be 2k and used for bindings 0 & 1, the second is 1k for binding 2 uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 2048; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub1; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub1); ASSERT_VK_SUCCESS(err); // buffer2 buffCI.size = 1024; VkBuffer dyub2; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub2); ASSERT_VK_SUCCESS(err); // Allocate memory and bind to buffers VkMemoryAllocateInfo mem_alloc[2] = {}; mem_alloc[0].sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc[0].pNext = NULL; mem_alloc[0].memoryTypeIndex = 0; mem_alloc[1].sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc[1].pNext = NULL; mem_alloc[1].memoryTypeIndex = 0; VkMemoryRequirements mem_reqs1; vkGetBufferMemoryRequirements(m_device->device(), dyub1, &mem_reqs1); VkMemoryRequirements mem_reqs2; vkGetBufferMemoryRequirements(m_device->device(), dyub2, &mem_reqs2); mem_alloc[0].allocationSize = mem_reqs1.size; bool pass = m_device->phy().set_memory_type(mem_reqs1.memoryTypeBits, &mem_alloc[0], 0); mem_alloc[1].allocationSize = mem_reqs2.size; pass &= m_device->phy().set_memory_type(mem_reqs2.memoryTypeBits, &mem_alloc[1], 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), dyub1, NULL); vkDestroyBuffer(m_device->device(), dyub2, NULL); return; } VkDeviceMemory mem1; err = vkAllocateMemory(m_device->device(), &mem_alloc[0], NULL, &mem1); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub1, mem1, 0); ASSERT_VK_SUCCESS(err); VkDeviceMemory mem2; err = vkAllocateMemory(m_device->device(), &mem_alloc[1], NULL, &mem2); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub2, mem2, 0); ASSERT_VK_SUCCESS(err); // Update descriptors const uint32_t BINDING_COUNT = 3; VkDescriptorBufferInfo buff_info[BINDING_COUNT] = {}; buff_info[0].buffer = dyub1; buff_info[0].offset = 0; buff_info[0].range = 256; buff_info[1].buffer = dyub1; buff_info[1].offset = 256; buff_info[1].range = 512; buff_info[2].buffer = dyub2; buff_info[2].offset = 0; buff_info[2].range = 512; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = BINDING_COUNT; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; descriptor_write.pBufferInfo = buff_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo1 { int x; int y; } bar1;\n" "layout(set=0) layout(binding=2) uniform foo2 { int x; int y; } bar2;\n" "void main(){\n" " x = vec4(bar1.y) + vec4(bar2.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // This update should succeed, but offset of inactive binding 1 oversteps binding 2 buffer size // we used to have a bug in this case. uint32_t dyn_off[BINDING_COUNT] = {0, 1024, 256}; vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, BINDING_COUNT, dyn_off); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyNotFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), dyub1, NULL); vkDestroyBuffer(m_device->device(), dyub2, NULL); vkFreeMemory(m_device->device(), mem1, NULL); vkFreeMemory(m_device->device(), mem2, NULL); } TEST_F(VkPositiveLayerTest, NonCoherentMemoryMapping) { TEST_DESCRIPTION( "Ensure that validations handling of non-coherent memory mapping while using VK_WHOLE_SIZE does not cause access " "violations"); VkResult err; uint8_t *pData; ASSERT_NO_FATAL_FAILURE(Init()); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; mem_reqs.memoryTypeBits = 0xFFFFFFFF; const VkDeviceSize atom_size = m_device->props.limits.nonCoherentAtomSize; VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; static const VkDeviceSize allocation_size = 32 * atom_size; alloc_info.allocationSize = allocation_size; // Find a memory configurations WITHOUT a COHERENT bit, otherwise exit bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { pass = m_device->phy().set_memory_type( mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { printf("%s Couldn't find a memory type wihtout a COHERENT bit.\n", kSkipPrefix); return; } } } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); // Map/Flush/Invalidate using WHOLE_SIZE and zero offsets and entire mapped range m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); VkMappedMemoryRange mmr = {}; mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = 0; mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); // Map/Flush/Invalidate using WHOLE_SIZE and an offset and entire mapped range m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 5 * atom_size, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = 6 * atom_size; mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); // Map with offset and size // Flush/Invalidate subrange of mapped area with offset and size m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 3 * atom_size, 9 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = 4 * atom_size; mmr.size = 2 * atom_size; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); // Map without offset and flush WHOLE_SIZE with two separate offsets m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = allocation_size - (4 * atom_size); mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); mmr.offset = allocation_size - (6 * atom_size); mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); vkFreeMemory(m_device->device(), mem, NULL); } // This is a positive test. We used to expect error in this case but spec now allows it TEST_F(VkPositiveLayerTest, ResetUnsignaledFence) { m_errorMonitor->ExpectSuccess(); vk_testing::Fence testFence; VkFenceCreateInfo fenceInfo = {}; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; ASSERT_NO_FATAL_FAILURE(Init()); testFence.init(*m_device, fenceInfo); VkFence fences[1] = {testFence.handle()}; VkResult result = vkResetFences(m_device->device(), 1, fences); ASSERT_VK_SUCCESS(result); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CommandBufferSimultaneousUseSync) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkResult err; // Record (empty!) command buffer that can be submitted multiple times // simultaneously. VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, nullptr}; m_commandBuffer->begin(&cbbi); m_commandBuffer->end(); VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; VkFence fence; err = vkCreateFence(m_device->device(), &fci, nullptr, &fence); ASSERT_VK_SUCCESS(err); VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0}; VkSemaphore s1, s2; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s1); ASSERT_VK_SUCCESS(err); err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s2); ASSERT_VK_SUCCESS(err); // Submit CB once signaling s1, with fence so we can roll forward to its retirement. VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 1, &m_commandBuffer->handle(), 1, &s1}; err = vkQueueSubmit(m_device->m_queue, 1, &si, fence); ASSERT_VK_SUCCESS(err); // Submit CB again, signaling s2. si.pSignalSemaphores = &s2; err = vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); // Wait for fence. err = vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); ASSERT_VK_SUCCESS(err); // CB is still in flight from second submission, but semaphore s1 is no // longer in flight. delete it. vkDestroySemaphore(m_device->device(), s1, nullptr); m_errorMonitor->VerifyNotFound(); // Force device idle and clean up remaining objects vkDeviceWaitIdle(m_device->device()); vkDestroySemaphore(m_device->device(), s2, nullptr); vkDestroyFence(m_device->device(), fence, nullptr); } TEST_F(VkPositiveLayerTest, FenceCreateSignaledWaitHandling) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkResult err; // A fence created signaled VkFenceCreateInfo fci1 = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, VK_FENCE_CREATE_SIGNALED_BIT}; VkFence f1; err = vkCreateFence(m_device->device(), &fci1, nullptr, &f1); ASSERT_VK_SUCCESS(err); // A fence created not VkFenceCreateInfo fci2 = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; VkFence f2; err = vkCreateFence(m_device->device(), &fci2, nullptr, &f2); ASSERT_VK_SUCCESS(err); // Submit the unsignaled fence VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 0, nullptr, 0, nullptr}; err = vkQueueSubmit(m_device->m_queue, 1, &si, f2); // Wait on both fences, with signaled first. VkFence fences[] = {f1, f2}; vkWaitForFences(m_device->device(), 2, fences, VK_TRUE, UINT64_MAX); // Should have both retired! vkDestroyFence(m_device->device(), f1, nullptr); vkDestroyFence(m_device->device(), f2, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreateImageViewFollowsParameterCompatibilityRequirements) { TEST_DESCRIPTION("Verify that creating an ImageView with valid usage does not generate validation errors."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {128, 128, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); VkImageView imageView; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); } TEST_F(VkPositiveLayerTest, ValidUsage) { TEST_DESCRIPTION("Verify that creating an image view from an image with valid usage doesn't generate validation errors"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); // Verify that we can create a view with usage INPUT_ATTACHMENT VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView imageView; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, BindSparse) { TEST_DESCRIPTION("Bind 2 memory ranges to one image using vkQueueBindSparse, destroy the image and then free the memory"); ASSERT_NO_FATAL_FAILURE(Init()); auto index = m_device->graphics_queue_node_index_; if (!(m_device->queue_props[index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT)) { printf("%s Graphics queue does not have sparse binding bit.\n", kSkipPrefix); return; } if (!m_device->phy().features().sparseBinding) { printf("%s Device does not support sparse bindings.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); VkImage image; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory memory_one, memory_two; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Find an image big enough to allow sparse mapping of 2 memory regions // Increase the image size until it is at least twice the // size of the required alignment, to ensure we can bind both // allocated memory blocks to the image on aligned offsets. while (memory_reqs.size < (memory_reqs.alignment * 2)) { vkDestroyImage(m_device->device(), image, nullptr); image_create_info.extent.width *= 2; image_create_info.extent.height *= 2; err = vkCreateImage(m_device->device(), &image_create_info, nullptr, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); } // Allocate 2 memory regions of minimum alignment size, bind one at 0, the other // at the end of the first memory_info.allocationSize = memory_reqs.alignment; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &memory_one); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &memory_two); ASSERT_VK_SUCCESS(err); VkSparseMemoryBind binds[2]; binds[0].flags = 0; binds[0].memory = memory_one; binds[0].memoryOffset = 0; binds[0].resourceOffset = 0; binds[0].size = memory_info.allocationSize; binds[1].flags = 0; binds[1].memory = memory_two; binds[1].memoryOffset = 0; binds[1].resourceOffset = memory_info.allocationSize; binds[1].size = memory_info.allocationSize; VkSparseImageOpaqueMemoryBindInfo opaqueBindInfo; opaqueBindInfo.image = image; opaqueBindInfo.bindCount = 2; opaqueBindInfo.pBinds = binds; VkFence fence = VK_NULL_HANDLE; VkBindSparseInfo bindSparseInfo = {}; bindSparseInfo.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO; bindSparseInfo.imageOpaqueBindCount = 1; bindSparseInfo.pImageOpaqueBinds = &opaqueBindInfo; vkQueueBindSparse(m_device->m_queue, 1, &bindSparseInfo, fence); vkQueueWaitIdle(m_device->m_queue); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), memory_one, NULL); vkFreeMemory(m_device->device(), memory_two, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, BindSparseMetadata) { TEST_DESCRIPTION("Bind memory for the metadata aspect of a sparse image"); ASSERT_NO_FATAL_FAILURE(Init()); auto index = m_device->graphics_queue_node_index_; if (!(m_device->queue_props[index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT)) { printf("%s Graphics queue does not have sparse binding bit.\n", kSkipPrefix); return; } if (!m_device->phy().features().sparseResidencyImage2D) { printf("%s Device does not support sparse residency for images.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); // Create a sparse image VkImage image; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Query image memory requirements VkMemoryRequirements memory_reqs; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Query sparse memory requirements uint32_t sparse_reqs_count = 0; vkGetImageSparseMemoryRequirements(m_device->device(), image, &sparse_reqs_count, nullptr); std::vector<VkSparseImageMemoryRequirements> sparse_reqs(sparse_reqs_count); vkGetImageSparseMemoryRequirements(m_device->device(), image, &sparse_reqs_count, sparse_reqs.data()); // Find requirements for metadata aspect const VkSparseImageMemoryRequirements *metadata_reqs = nullptr; for (auto const &aspect_sparse_reqs : sparse_reqs) { if (aspect_sparse_reqs.formatProperties.aspectMask == VK_IMAGE_ASPECT_METADATA_BIT) { metadata_reqs = &aspect_sparse_reqs; } } if (!metadata_reqs) { printf("%s Sparse image does not require memory for metadata.\n", kSkipPrefix); } else { // Allocate memory for the metadata VkDeviceMemory metadata_memory = VK_NULL_HANDLE; VkMemoryAllocateInfo metadata_memory_info = {}; metadata_memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; metadata_memory_info.allocationSize = metadata_reqs->imageMipTailSize; m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &metadata_memory_info, 0); err = vkAllocateMemory(m_device->device(), &metadata_memory_info, NULL, &metadata_memory); ASSERT_VK_SUCCESS(err); // Bind metadata VkSparseMemoryBind sparse_bind = {}; sparse_bind.resourceOffset = metadata_reqs->imageMipTailOffset; sparse_bind.size = metadata_reqs->imageMipTailSize; sparse_bind.memory = metadata_memory; sparse_bind.memoryOffset = 0; sparse_bind.flags = VK_SPARSE_MEMORY_BIND_METADATA_BIT; VkSparseImageOpaqueMemoryBindInfo opaque_bind_info = {}; opaque_bind_info.image = image; opaque_bind_info.bindCount = 1; opaque_bind_info.pBinds = &sparse_bind; VkBindSparseInfo bind_info = {}; bind_info.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO; bind_info.imageOpaqueBindCount = 1; bind_info.pImageOpaqueBinds = &opaque_bind_info; vkQueueBindSparse(m_device->m_queue, 1, &bind_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); // Cleanup vkQueueWaitIdle(m_device->m_queue); vkFreeMemory(m_device->device(), metadata_memory, NULL); } vkDestroyImage(m_device->device(), image, NULL); } TEST_F(VkPositiveLayerTest, FramebufferBindingDestroyCommandPool) { TEST_DESCRIPTION( "This test should pass. Create a Framebuffer and command buffer, bind them together, then destroy command pool and " "framebuffer and verify there are no errors."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Explicitly create a command buffer to bind the FB to so that we can then // destroy the command pool in order to implicitly free command buffer VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer); // Begin our cmd buffer with renderpass using our framebuffer VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer, &begin_info); vkCmdBeginRenderPass(command_buffer, &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(command_buffer); vkEndCommandBuffer(command_buffer); // Destroy command pool to implicitly free command buffer vkDestroyCommandPool(m_device->device(), command_pool, NULL); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, FramebufferCreateDepthStencilLayoutTransitionForDepthOnlyImageView) { TEST_DESCRIPTION( "Validate that when an imageView of a depth/stencil image is used as a depth/stencil framebuffer attachment, the " "aspectMask is ignored and both depth and stencil image subresources are used."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, &format_properties); if (!(format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) { printf("%s Image format does not support sampling.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkAttachmentDescription attachment = {0, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &att_ref, 0, nullptr}; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 1, &dep}; VkResult err; VkRenderPass rp; err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_D32_SFLOAT_S8_UINT, 0x26, // usage VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); image.SetLayout(0x6, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_D32_SFLOAT_S8_UINT, {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A}, {0x2, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkImageMemoryBarrier imb = {}; imb.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; imb.pNext = nullptr; imb.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; imb.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; imb.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; imb.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; imb.srcQueueFamilyIndex = 0; imb.dstQueueFamilyIndex = 0; imb.image = image.handle(); imb.subresourceRange.aspectMask = 0x6; imb.subresourceRange.baseMipLevel = 0; imb.subresourceRange.levelCount = 0x1; imb.subresourceRange.baseArrayLayer = 0; imb.subresourceRange.layerCount = 0x1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &imb); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyNotFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, BarrierLayoutToImageUsage) { TEST_DESCRIPTION("Ensure barriers' new and old VkImageLayout are compatible with their images' VkImageUsageFlags"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = NULL; img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; { VkImageObj img_color(m_device); img_color.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_color.initialized()); VkImageObj img_ds1(m_device); img_ds1.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_ds1.initialized()); VkImageObj img_ds2(m_device); img_ds2.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_ds2.initialized()); VkImageObj img_xfer_src(m_device); img_xfer_src.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_src.initialized()); VkImageObj img_xfer_dst(m_device); img_xfer_dst.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_dst.initialized()); VkImageObj img_sampled(m_device); img_sampled.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_sampled.initialized()); VkImageObj img_input(m_device); img_input.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_input.initialized()); const struct { VkImageObj &image_obj; VkImageLayout old_layout; VkImageLayout new_layout; } buffer_layouts[] = { // clang-format off {img_color, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_ds1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_ds2, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_sampled, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_input, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_xfer_src, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_xfer_dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, // clang-format on }; const uint32_t layout_count = sizeof(buffer_layouts) / sizeof(buffer_layouts[0]); m_commandBuffer->begin(); for (uint32_t i = 0; i < layout_count; ++i) { img_barrier.image = buffer_layouts[i].image_obj.handle(); const VkImageUsageFlags usage = buffer_layouts[i].image_obj.usage(); img_barrier.subresourceRange.aspectMask = (usage == VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ? (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) : VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.oldLayout = buffer_layouts[i].old_layout; img_barrier.newLayout = buffer_layouts[i].new_layout; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); img_barrier.oldLayout = buffer_layouts[i].new_layout; img_barrier.newLayout = buffer_layouts[i].old_layout; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); } m_commandBuffer->end(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; } m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, WaitEventThenSet) { TEST_DESCRIPTION("Wait on a event then set it after the wait has been submitted."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer, &begin_info); vkCmdWaitEvents(command_buffer, 1, &event, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 0, nullptr); vkCmdResetEvent(command_buffer, event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); vkEndCommandBuffer(command_buffer); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = nullptr; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { vkSetEvent(m_device->device(), event); } vkQueueWaitIdle(queue); vkDestroyEvent(m_device->device(), event, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 1, &command_buffer); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, QueryAndCopySecondaryCommandBuffers) { TEST_DESCRIPTION("Issue a query on a secondary command buffer and copy it on a primary."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info{}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP; query_pool_create_info.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool); VkCommandPoolObj command_pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj primary_buffer(m_device, &command_pool); VkCommandBufferObj secondary_buffer(m_device, &command_pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); uint32_t qfi = 0; VkBufferCreateInfo buff_create_info = {}; buff_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_create_info.size = 1024; buff_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buff_create_info.queueFamilyIndexCount = 1; buff_create_info.pQueueFamilyIndices = &qfi; VkResult err; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); VkCommandBufferInheritanceInfo hinfo = {}; hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; hinfo.renderPass = VK_NULL_HANDLE; hinfo.subpass = 0; hinfo.framebuffer = VK_NULL_HANDLE; hinfo.occlusionQueryEnable = VK_FALSE; hinfo.queryFlags = 0; hinfo.pipelineStatistics = 0; { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; begin_info.pInheritanceInfo = &hinfo; secondary_buffer.begin(&begin_info); vkCmdResetQueryPool(secondary_buffer.handle(), query_pool, 0, 1); vkCmdWriteTimestamp(secondary_buffer.handle(), VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, query_pool, 0); secondary_buffer.end(); primary_buffer.begin(); vkCmdExecuteCommands(primary_buffer.handle(), 1, &secondary_buffer.handle()); vkCmdCopyQueryPoolResults(primary_buffer.handle(), query_pool, 0, 1, buffer, 0, 0, 0); primary_buffer.end(); } primary_buffer.QueueCommandBuffer(); vkQueueWaitIdle(queue); vkDestroyQueryPool(m_device->device(), query_pool, nullptr); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), mem, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, QueryAndCopyMultipleCommandBuffers) { TEST_DESCRIPTION("Issue a query and copy from it on a second command buffer."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info{}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP; query_pool_create_info.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); uint32_t qfi = 0; VkBufferCreateInfo buff_create_info = {}; buff_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_create_info.size = 1024; buff_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buff_create_info.queueFamilyIndexCount = 1; buff_create_info.pQueueFamilyIndices = &qfi; VkResult err; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdResetQueryPool(command_buffer[0], query_pool, 0, 1); vkCmdWriteTimestamp(command_buffer[0], VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, query_pool, 0); vkEndCommandBuffer(command_buffer[0]); vkBeginCommandBuffer(command_buffer[1], &begin_info); vkCmdCopyQueryPoolResults(command_buffer[1], query_pool, 0, 1, buffer, 0, 0, 0); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 2; submit_info.pCommandBuffers = command_buffer; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = nullptr; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } vkQueueWaitIdle(queue); vkDestroyQueryPool(m_device->device(), query_pool, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, command_buffer); vkDestroyCommandPool(m_device->device(), command_pool, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), mem, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, ResetEventThenSet) { TEST_DESCRIPTION("Reset an event then set it after the reset has been submitted."); ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer, &begin_info); vkCmdResetEvent(command_buffer, event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); vkEndCommandBuffer(command_buffer); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = nullptr; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is already in use by a command buffer."); vkSetEvent(m_device->device(), event); m_errorMonitor->VerifyFound(); } vkQueueWaitIdle(queue); vkDestroyEvent(m_device->device(), event, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 1, &command_buffer); vkDestroyCommandPool(m_device->device(), command_pool, NULL); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoFencesThreeFrames) { TEST_DESCRIPTION( "Two command buffers with two separate fences are each run through a Submit & WaitForFences cycle 3 times. This previously " "revealed a bug so running this positive test to prevent a regression."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); static const uint32_t NUM_OBJECTS = 2; static const uint32_t NUM_FRAMES = 3; VkCommandBuffer cmd_buffers[NUM_OBJECTS] = {}; VkFence fences[NUM_OBJECTS] = {}; VkCommandPool cmd_pool; VkCommandPoolCreateInfo cmd_pool_ci = {}; cmd_pool_ci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmd_pool_ci.queueFamilyIndex = m_device->graphics_queue_node_index_; cmd_pool_ci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; VkResult err = vkCreateCommandPool(m_device->device(), &cmd_pool_ci, nullptr, &cmd_pool); ASSERT_VK_SUCCESS(err); VkCommandBufferAllocateInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmd_buf_info.commandPool = cmd_pool; cmd_buf_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cmd_buf_info.commandBufferCount = 1; VkFenceCreateInfo fence_ci = {}; fence_ci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fence_ci.pNext = nullptr; fence_ci.flags = 0; for (uint32_t i = 0; i < NUM_OBJECTS; ++i) { err = vkAllocateCommandBuffers(m_device->device(), &cmd_buf_info, &cmd_buffers[i]); ASSERT_VK_SUCCESS(err); err = vkCreateFence(m_device->device(), &fence_ci, nullptr, &fences[i]); ASSERT_VK_SUCCESS(err); } for (uint32_t frame = 0; frame < NUM_FRAMES; ++frame) { for (uint32_t obj = 0; obj < NUM_OBJECTS; ++obj) { // Create empty cmd buffer VkCommandBufferBeginInfo cmdBufBeginDesc = {}; cmdBufBeginDesc.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; err = vkBeginCommandBuffer(cmd_buffers[obj], &cmdBufBeginDesc); ASSERT_VK_SUCCESS(err); err = vkEndCommandBuffer(cmd_buffers[obj]); ASSERT_VK_SUCCESS(err); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmd_buffers[obj]; // Submit cmd buffer and wait for fence err = vkQueueSubmit(queue, 1, &submit_info, fences[obj]); ASSERT_VK_SUCCESS(err); err = vkWaitForFences(m_device->device(), 1, &fences[obj], VK_TRUE, UINT64_MAX); ASSERT_VK_SUCCESS(err); err = vkResetFences(m_device->device(), 1, &fences[obj]); ASSERT_VK_SUCCESS(err); } } m_errorMonitor->VerifyNotFound(); vkDestroyCommandPool(m_device->device(), cmd_pool, NULL); for (uint32_t i = 0; i < NUM_OBJECTS; ++i) { vkDestroyFence(m_device->device(), fences[i], nullptr); } } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceQWI) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues followed by a QueueWaitIdle."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } vkQueueWaitIdle(m_device->m_queue); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceQWIFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence followed " "by a QueueWaitIdle."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkQueueWaitIdle(m_device->m_queue); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceTwoWFF) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence followed " "by two consecutive WaitForFences calls on the same fence."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, TwoQueuesEnsureCorrectRetirementWithWorkStolen) { ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Test requires two queues, skipping\n", kSkipPrefix); return; } VkResult err; m_errorMonitor->ExpectSuccess(); VkQueue q0 = m_device->m_queue; VkQueue q1 = nullptr; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &q1); ASSERT_NE(q1, nullptr); // An (empty) command buffer. We must have work in the first submission -- // the layer treats unfenced work differently from fenced work. VkCommandPoolCreateInfo cpci = {VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, nullptr, 0, 0}; VkCommandPool pool; err = vkCreateCommandPool(m_device->device(), &cpci, nullptr, &pool); ASSERT_VK_SUCCESS(err); VkCommandBufferAllocateInfo cbai = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, nullptr, pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, 1}; VkCommandBuffer cb; err = vkAllocateCommandBuffers(m_device->device(), &cbai, &cb); ASSERT_VK_SUCCESS(err); VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, nullptr}; err = vkBeginCommandBuffer(cb, &cbbi); ASSERT_VK_SUCCESS(err); err = vkEndCommandBuffer(cb); ASSERT_VK_SUCCESS(err); // A semaphore VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0}; VkSemaphore s; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s); ASSERT_VK_SUCCESS(err); // First submission, to q0 VkSubmitInfo s0 = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 1, &cb, 1, &s}; err = vkQueueSubmit(q0, 1, &s0, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); // Second submission, to q1, waiting on s VkFlags waitmask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; // doesn't really matter what this value is. VkSubmitInfo s1 = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &s, &waitmask, 0, nullptr, 0, nullptr}; err = vkQueueSubmit(q1, 1, &s1, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); // Wait for q0 idle err = vkQueueWaitIdle(q0); ASSERT_VK_SUCCESS(err); // Command buffer should have been completed (it was on q0); reset the pool. vkFreeCommandBuffers(m_device->device(), pool, 1, &cb); m_errorMonitor->VerifyNotFound(); // Force device completely idle and clean up resources vkDeviceWaitIdle(m_device->device()); vkDestroyCommandPool(m_device->device(), pool, nullptr); vkDestroySemaphore(m_device->device(), s, nullptr); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence, " "followed by a WaitForFences call."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueWithSemaphoreAndOneFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call on the same queue, sharing a signal/wait semaphore, the second " "having a fence, followed by a WaitForFences call."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueNullQueueSubmitWithFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call on the same queue, no fences, followed by a third QueueSubmit " "with NO SubmitInfos but with a fence, followed by a WaitForFences call."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = VK_NULL_HANDLE; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = VK_NULL_HANDLE; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } vkQueueSubmit(m_device->m_queue, 0, NULL, fence); VkResult err = vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); ASSERT_VK_SUCCESS(err); vkDestroyFence(m_device->device(), fence, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueOneFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call on the same queue, the second having a fence, followed by a " "WaitForFences call."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = VK_NULL_HANDLE; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = VK_NULL_HANDLE; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoSubmitInfosWithSemaphoreOneQueueSubmitsOneFence) { TEST_DESCRIPTION( "Two command buffers each in a separate SubmitInfo sent in a single QueueSubmit call followed by a WaitForFences call."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info[2]; VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].pNext = NULL; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = &command_buffer[0]; submit_info[0].signalSemaphoreCount = 1; submit_info[0].pSignalSemaphores = &semaphore; submit_info[0].waitSemaphoreCount = 0; submit_info[0].pWaitSemaphores = NULL; submit_info[0].pWaitDstStageMask = 0; submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[1].pNext = NULL; submit_info[1].commandBufferCount = 1; submit_info[1].pCommandBuffers = &command_buffer[1]; submit_info[1].waitSemaphoreCount = 1; submit_info[1].pWaitSemaphores = &semaphore; submit_info[1].pWaitDstStageMask = flags; submit_info[1].signalSemaphoreCount = 0; submit_info[1].pSignalSemaphores = NULL; vkQueueSubmit(m_device->m_queue, 2, &submit_info[0], fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); vkDestroySemaphore(m_device->device(), semaphore, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineAttribMatrixType) { TEST_DESCRIPTION("Test that pipeline validation accepts matrices passed as vertex attributes"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs[2]; memset(input_attribs, 0, sizeof(input_attribs)); for (int i = 0; i < 2; i++) { input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT; input_attribs[i].location = i; } char const *vsSource = "#version 450\n" "\n" "layout(location=0) in mat2x4 x;\n" "void main(){\n" " gl_Position = x[0] + x[1];\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(input_attribs, 2); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); /* expect success */ m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineAttribArrayType) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs[2]; memset(input_attribs, 0, sizeof(input_attribs)); for (int i = 0; i < 2; i++) { input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT; input_attribs[i].location = i; } char const *vsSource = "#version 450\n" "\n" "layout(location=0) in vec4 x[2];\n" "void main(){\n" " gl_Position = x[0] + x[1];\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(input_attribs, 2); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineAttribComponents) { TEST_DESCRIPTION( "Test that pipeline validation accepts consuming a vertex attribute through multiple vertex shader inputs, each consuming " "a different subset of the components, and that fragment shader-attachment validation tolerates multiple duplicate " "location outputs"); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs[3]; memset(input_attribs, 0, sizeof(input_attribs)); for (int i = 0; i < 3; i++) { input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT; input_attribs[i].location = i; } char const *vsSource = "#version 450\n" "\n" "layout(location=0) in vec4 x;\n" "layout(location=1) in vec3 y1;\n" "layout(location=1, component=3) in float y2;\n" "layout(location=2) in vec4 z;\n" "void main(){\n" " gl_Position = x + vec4(y1, y2) + z;\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0, component=0) out float color0;\n" "layout(location=0, component=1) out float color1;\n" "layout(location=0, component=2) out float color2;\n" "layout(location=0, component=3) out float color3;\n" "layout(location=1, component=0) out vec2 second_color0;\n" "layout(location=1, component=2) out vec2 second_color1;\n" "void main(){\n" " color0 = float(1);\n" " second_color0 = vec2(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); // Create a renderPass with two color attachments VkAttachmentReference attachments[2] = {}; attachments[0].layout = VK_IMAGE_LAYOUT_GENERAL; attachments[1].attachment = 1; attachments[1].layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = attachments; subpass.colorAttachmentCount = 2; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 2; VkAttachmentDescription attach_desc[2] = {}; attach_desc[0].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[0].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attach_desc[1].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[1].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[1].finalLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc[1].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; rpci.pAttachments = attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass renderpass; vkCreateRenderPass(m_device->device(), &rpci, NULL, &renderpass); pipe.AddShader(&vs); pipe.AddShader(&fs); VkPipelineColorBlendAttachmentState att_state1 = {}; att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state1.blendEnable = VK_FALSE; pipe.AddColorAttachment(0, att_state1); pipe.AddColorAttachment(1, att_state1); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(input_attribs, 3); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderpass); vkDestroyRenderPass(m_device->device(), renderpass, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineSimplePositive) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "void main(){\n" " gl_Position = vec4(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineRelaxedTypeMatch) { TEST_DESCRIPTION( "Test that pipeline validation accepts the relaxed type matching rules set out in 14.1.3: fundamental type must match, and " "producer side must have at least as many components"); m_errorMonitor->ExpectSuccess(); // VK 1.0.8 Specification, 14.1.3 "Additionally,..." block ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "layout(location=0) out vec3 x;\n" "layout(location=1) out ivec3 y;\n" "layout(location=2) out vec3 z;\n" "void main(){\n" " gl_Position = vec4(0);\n" " x = vec3(0); y = ivec3(0); z = vec3(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "layout(location=0) in float x;\n" "layout(location=1) flat in int y;\n" "layout(location=2) in vec2 z;\n" "void main(){\n" " color = vec4(1 + x + y + z.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = VK_SUCCESS; err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineTessPerVertex) { TEST_DESCRIPTION("Test that pipeline validation accepts per-vertex variables passed between the TCS and TES stages"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "void main(){}\n"; char const *tcsSource = "#version 450\n" "layout(location=0) out int x[];\n" "layout(vertices=3) out;\n" "void main(){\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" " x[gl_InvocationID] = gl_InvocationID;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "layout(location=0) in int x[];\n" "void main(){\n" " gl_Position.xyz = gl_TessCoord;\n" " gl_Position.w = x[0] + x[1] + x[2];\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkPipelineObj pipe(m_device); pipe.SetInputAssembly(&iasci); pipe.SetTessellation(&tsci); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineGeometryInputBlockPositive) { TEST_DESCRIPTION( "Test that pipeline validation accepts a user-defined interface block passed into the geometry shader. This is interesting " "because the 'extra' array level is not present on the member type, but on the block instance."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().geometryShader) { printf("%s Device does not support geometry shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "layout(location=0) out VertexData { vec4 x; } vs_out;\n" "void main(){\n" " vs_out.x = vec4(1);\n" "}\n"; char const *gsSource = "#version 450\n" "layout(triangles) in;\n" "layout(triangle_strip, max_vertices=3) out;\n" "layout(location=0) in VertexData { vec4 x; } gs_in[];\n" "void main() {\n" " gl_Position = gs_in[0].x;\n" " EmitVertex();\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj gs(m_device, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&gs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipeline64BitAttributesPositive) { TEST_DESCRIPTION( "Test that pipeline validation accepts basic use of 64bit vertex attributes. This is interesting because they consume " "multiple locations."); m_errorMonitor->ExpectSuccess(); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().shaderFloat64) { printf("%s Device does not support 64bit vertex attributes; skipped.\n", kSkipPrefix); return; } // Set 64bit format to support VTX Buffer feature PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { return; } VkFormatProperties format_props; fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R64G64B64A64_SFLOAT, &format_props); format_props.bufferFeatures |= VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R64G64B64A64_SFLOAT, format_props); VkVertexInputBindingDescription input_bindings[1]; memset(input_bindings, 0, sizeof(input_bindings)); VkVertexInputAttributeDescription input_attribs[4]; memset(input_attribs, 0, sizeof(input_attribs)); input_attribs[0].location = 0; input_attribs[0].offset = 0; input_attribs[0].format = VK_FORMAT_R64G64B64A64_SFLOAT; input_attribs[1].location = 2; input_attribs[1].offset = 32; input_attribs[1].format = VK_FORMAT_R64G64B64A64_SFLOAT; input_attribs[2].location = 4; input_attribs[2].offset = 64; input_attribs[2].format = VK_FORMAT_R64G64B64A64_SFLOAT; input_attribs[3].location = 6; input_attribs[3].offset = 96; input_attribs[3].format = VK_FORMAT_R64G64B64A64_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=0) in dmat4 x;\n" "void main(){\n" " gl_Position = vec4(x[0][0]);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(input_bindings, 1); pipe.AddVertexInputAttribs(input_attribs, 4); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineInputAttachmentPositive) { TEST_DESCRIPTION("Positive test for a correctly matched input attachment"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); VkAttachmentDescription descs[2] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference color = { 0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }; VkAttachmentReference input = { 1, VK_IMAGE_LAYOUT_GENERAL, }; VkSubpassDescription sd = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input, 1, &color, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descs, 1, &sd, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // should be OK. would go wrong here if it's going to... pipe.CreateVKPipeline(pl.handle(), rp); m_errorMonitor->VerifyNotFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, CreateComputePipelineMissingDescriptorUnusedPositive) { TEST_DESCRIPTION( "Test that pipeline validation accepts a compute pipeline which declares a descriptor-backed resource which is not " "provided, but the shader does not statically use it. This is interesting because it requires compute pipelines to have a " "proper descriptor use walk, which they didn't for some time."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) buffer block { vec4 x; };\n" "void main(){\n" " // x is not used.\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, descriptorSet.GetPipelineLayout(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsSampler) { TEST_DESCRIPTION( "Test that pipeline validation accepts a shader consuming only the sampler portion of a combined image + sampler"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); std::vector<VkDescriptorSetLayoutBinding> bindings = { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {2, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }; const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) uniform sampler s;\n" "layout(set=0, binding=1) uniform texture2D t;\n" "layout(set=0, binding=2) buffer block { vec4 x; };\n" "void main() {\n" " x = texture(sampler2D(t, s), vec2(0));\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsImage) { TEST_DESCRIPTION( "Test that pipeline validation accepts a shader consuming only the image portion of a combined image + sampler"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); std::vector<VkDescriptorSetLayoutBinding> bindings = { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {2, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }; const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) uniform texture2D t;\n" "layout(set=0, binding=1) uniform sampler s;\n" "layout(set=0, binding=2) buffer block { vec4 x; };\n" "void main() {\n" " x = texture(sampler2D(t, s), vec2(0));\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsBoth) { TEST_DESCRIPTION( "Test that pipeline validation accepts a shader consuming both the sampler and the image of a combined image+sampler but " "via separate variables"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); std::vector<VkDescriptorSetLayoutBinding> bindings = { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }; const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) uniform texture2D t;\n" "layout(set=0, binding=0) uniform sampler s; // both binding 0!\n" "layout(set=0, binding=1) buffer block { vec4 x; };\n" "void main() {\n" " x = texture(sampler2D(t, s), vec2(0));\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateDescriptorSetBindingWithIgnoredSamplers) { TEST_DESCRIPTION("Test that layers conditionally do ignore the pImmutableSamplers on vkCreateDescriptorSetLayout"); bool prop2_found = false; if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); prop2_found = true; } else { printf("%s %s Extension not supported, skipping push descriptor sub-tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool push_descriptor_found = false; if (prop2_found && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); push_descriptor_found = true; } else { printf("%s %s Extension not supported, skipping push descriptor sub-tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); const uint64_t fake_address_64 = 0xCDCDCDCDCDCDCDCD; const uint64_t fake_address_32 = 0xCDCDCDCD; const void *fake_pointer = sizeof(void *) == 8 ? reinterpret_cast<void *>(fake_address_64) : reinterpret_cast<void *>(fake_address_32); const VkSampler *hopefully_undereferencable_pointer = reinterpret_cast<const VkSampler *>(fake_pointer); // regular descriptors m_errorMonitor->ExpectSuccess(); { const VkDescriptorSetLayoutBinding non_sampler_bindings[] = { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {2, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {3, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {4, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {5, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {6, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {7, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {8, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, }; const VkDescriptorSetLayoutCreateInfo dslci = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, nullptr, 0, static_cast<uint32_t>(size(non_sampler_bindings)), non_sampler_bindings}; VkDescriptorSetLayout dsl; const VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &dslci, nullptr, &dsl); ASSERT_VK_SUCCESS(err); vkDestroyDescriptorSetLayout(m_device->device(), dsl, nullptr); } m_errorMonitor->VerifyNotFound(); if (push_descriptor_found) { // push descriptors m_errorMonitor->ExpectSuccess(); { const VkDescriptorSetLayoutBinding non_sampler_bindings[] = { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {2, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {3, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {4, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {5, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {6, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, }; const VkDescriptorSetLayoutCreateInfo dslci = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, nullptr, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR, static_cast<uint32_t>(size(non_sampler_bindings)), non_sampler_bindings}; VkDescriptorSetLayout dsl; const VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &dslci, nullptr, &dsl); ASSERT_VK_SUCCESS(err); vkDestroyDescriptorSetLayout(m_device->device(), dsl, nullptr); } m_errorMonitor->VerifyNotFound(); } } TEST_F(VkPositiveLayerTest, Maintenance1Tests) { TEST_DESCRIPTION("Validate various special cases for the Maintenance1_KHR extension"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s Maintenance1 Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); VkCommandBufferObj cmd_buf(m_device, m_commandPool); cmd_buf.begin(); // Set Negative height, should give error if Maintenance 1 is not enabled VkViewport viewport = {0, 0, 16, -16, 0, 1}; vkCmdSetViewport(cmd_buf.handle(), 0, 1, &viewport); cmd_buf.end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, DuplicateValidPNextStructures) { TEST_DESCRIPTION("Create a pNext chain containing valid structures, but with a duplicate structure type"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME); } else { printf("%s VK_NV_dedicated_allocation extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Create two pNext structures which by themselves would be valid VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info = {}; VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info_2 = {}; dedicated_buffer_create_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV; dedicated_buffer_create_info.pNext = &dedicated_buffer_create_info_2; dedicated_buffer_create_info.dedicatedAllocation = VK_TRUE; dedicated_buffer_create_info_2.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV; dedicated_buffer_create_info_2.pNext = nullptr; dedicated_buffer_create_info_2.dedicatedAllocation = VK_TRUE; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.pNext = &dedicated_buffer_create_info; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "chain contains duplicate structure types"); VkBuffer buffer; vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DedicatedAllocation) { ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s Dedicated allocation extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkMemoryPropertyFlags mem_flags = 0; const VkDeviceSize resource_size = 1024; auto buffer_info = VkBufferObj::create_info(resource_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT); VkBufferObj buffer; buffer.init_no_mem(*m_device, buffer_info); auto buffer_alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer.memory_requirements(), mem_flags); auto buffer_dedicated_info = lvl_init_struct<VkMemoryDedicatedAllocateInfoKHR>(); buffer_dedicated_info.buffer = buffer.handle(); buffer_alloc_info.pNext = &buffer_dedicated_info; vk_testing::DeviceMemory dedicated_buffer_memory; dedicated_buffer_memory.init(*m_device, buffer_alloc_info); VkBufferObj wrong_buffer; wrong_buffer.init_no_mem(*m_device, buffer_info); // Bind with wrong buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-01508"); vkBindBufferMemory(m_device->handle(), wrong_buffer.handle(), dedicated_buffer_memory.handle(), 0); m_errorMonitor->VerifyFound(); // Bind with non-zero offset (same VUID) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-01508"); // offset must be zero m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-size-01037"); // offset pushes us past size auto offset = buffer.memory_requirements().alignment; vkBindBufferMemory(m_device->handle(), buffer.handle(), dedicated_buffer_memory.handle(), offset); m_errorMonitor->VerifyFound(); // Bind correctly (depends on the "skip" above) m_errorMonitor->ExpectSuccess(); vkBindBufferMemory(m_device->handle(), buffer.handle(), dedicated_buffer_memory.handle(), 0); m_errorMonitor->VerifyNotFound(); // And for images... vk_testing::Image image; vk_testing::Image wrong_image; auto image_info = vk_testing::Image::create_info(); image_info.extent.width = resource_size; image_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_info.format = VK_FORMAT_R8G8B8A8_UNORM; image.init_no_mem(*m_device, image_info); wrong_image.init_no_mem(*m_device, image_info); auto image_dedicated_info = lvl_init_struct<VkMemoryDedicatedAllocateInfoKHR>(); image_dedicated_info.image = image.handle(); auto image_alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image.memory_requirements(), mem_flags); image_alloc_info.pNext = &image_dedicated_info; vk_testing::DeviceMemory dedicated_image_memory; dedicated_image_memory.init(*m_device, image_alloc_info); // Bind with wrong image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-01509"); vkBindImageMemory(m_device->handle(), wrong_image.handle(), dedicated_image_memory.handle(), 0); m_errorMonitor->VerifyFound(); // Bind with non-zero offset (same VUID) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-01509"); // offset must be zero m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-size-01049"); // offset pushes us past size auto image_offset = image.memory_requirements().alignment; vkBindImageMemory(m_device->handle(), image.handle(), dedicated_image_memory.handle(), image_offset); m_errorMonitor->VerifyFound(); // Bind correctly (depends on the "skip" above) m_errorMonitor->ExpectSuccess(); vkBindImageMemory(m_device->handle(), image.handle(), dedicated_image_memory.handle(), 0); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ValidStructPNext) { TEST_DESCRIPTION("Verify that a valid pNext value is handled correctly"); // Positive test to check parameter_validation and unique_objects support for NV_dedicated_allocation ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME); } else { printf("%s VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME Extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info = {}; dedicated_buffer_create_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV; dedicated_buffer_create_info.pNext = nullptr; dedicated_buffer_create_info.dedicatedAllocation = VK_TRUE; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.pNext = &dedicated_buffer_create_info; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkBuffer buffer; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); VkDedicatedAllocationMemoryAllocateInfoNV dedicated_memory_info = {}; dedicated_memory_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV; dedicated_memory_info.pNext = nullptr; dedicated_memory_info.buffer = buffer; dedicated_memory_info.image = VK_NULL_HANDLE; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = &dedicated_memory_info; memory_info.allocationSize = memory_reqs.size; bool pass; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); VkDeviceMemory buffer_memory; err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, PSOPolygonModeValid) { TEST_DESCRIPTION("Verify that using a solid polygon fill mode works correctly."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Artificially disable support for non-solid fill modes features.fillModeNonSolid = false; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); VkRenderpassObj render_pass(&test_device); const VkPipelineLayoutObj pipeline_layout(&test_device); VkPipelineRasterizationStateCreateInfo rs_ci = {}; rs_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_ci.pNext = nullptr; rs_ci.lineWidth = 1.0f; rs_ci.rasterizerDiscardEnable = false; VkShaderObj vs(&test_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(&test_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set polygonMode=FILL. No error is expected m_errorMonitor->ExpectSuccess(); { VkPipelineObj pipe(&test_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); // Set polygonMode to a good value rs_ci.polygonMode = VK_POLYGON_MODE_FILL; pipe.SetRasterization(&rs_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); } m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, LongSemaphoreChain) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkResult err; std::vector<VkSemaphore> semaphores; const int chainLength = 32768; VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; for (int i = 0; i < chainLength; i++) { VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0}; VkSemaphore semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &semaphore); ASSERT_VK_SUCCESS(err); semaphores.push_back(semaphore); VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, semaphores.size() > 1 ? 1u : 0u, semaphores.size() > 1 ? &semaphores[semaphores.size() - 2] : nullptr, &flags, 0, nullptr, 1, &semaphores[semaphores.size() - 1]}; err = vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); } VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; VkFence fence; err = vkCreateFence(m_device->device(), &fci, nullptr, &fence); ASSERT_VK_SUCCESS(err); VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &semaphores.back(), &flags, 0, nullptr, 0, nullptr}; err = vkQueueSubmit(m_device->m_queue, 1, &si, fence); ASSERT_VK_SUCCESS(err); vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); for (auto semaphore : semaphores) vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkDestroyFence(m_device->device(), fence, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ExternalSemaphore) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external semaphore instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external semaphore device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external semaphore import and export capability VkPhysicalDeviceExternalSemaphoreInfoKHR esi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR, nullptr, handle_type}; VkExternalSemaphorePropertiesKHR esp = {VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"); vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(gpu(), &esi, &esp); if (!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) || !(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External semaphore does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; m_errorMonitor->ExpectSuccess(); // Create a semaphore to export payload from VkExportSemaphoreCreateInfoKHR esci = {VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR, nullptr, handle_type}; VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, &esci, 0}; VkSemaphore export_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &export_semaphore); ASSERT_VK_SUCCESS(err); // Create a semaphore to import payload into sci.pNext = nullptr; VkSemaphore import_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &import_semaphore); ASSERT_VK_SUCCESS(err); #ifdef _WIN32 // Export semaphore payload to an opaque handle HANDLE handle = nullptr; VkSemaphoreGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreWin32HandleKHR"); err = vkGetSemaphoreWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above VkImportSemaphoreWin32HandleInfoKHR ihi = { VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, nullptr, import_semaphore, 0, handle_type, handle, nullptr}; auto vkImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreWin32HandleKHR"); err = vkImportSemaphoreWin32HandleKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #else // Export semaphore payload to an opaque handle int fd = 0; VkSemaphoreGetFdInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreFdKHR"); err = vkGetSemaphoreFdKHR(m_device->device(), &ghi, &fd); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above VkImportSemaphoreFdInfoKHR ihi = { VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, nullptr, import_semaphore, 0, handle_type, fd}; auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR"); err = vkImportSemaphoreFdKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #endif // Signal the exported semaphore and wait on the imported semaphore VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo si[] = { {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, }; err = vkQueueSubmit(m_device->m_queue, 4, si, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); if (m_device->phy().features().sparseBinding) { // Signal the imported semaphore and wait on the exported semaphore VkBindSparseInfo bi[] = { {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &import_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &export_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &import_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &export_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, }; err = vkQueueBindSparse(m_device->m_queue, 4, bi, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); } // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroySemaphore(m_device->device(), export_semaphore, nullptr); vkDestroySemaphore(m_device->device(), import_semaphore, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ExternalFence) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external fence instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external fence device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external fence import and export capability VkPhysicalDeviceExternalFenceInfoKHR efi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR, nullptr, handle_type}; VkExternalFencePropertiesKHR efp = {VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalFencePropertiesKHR"); vkGetPhysicalDeviceExternalFencePropertiesKHR(gpu(), &efi, &efp); if (!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR) || !(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External fence does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; m_errorMonitor->ExpectSuccess(); // Create a fence to export payload from VkFence export_fence; { VkExportFenceCreateInfoKHR efci = {VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR, nullptr, handle_type}; VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, &efci, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &export_fence); ASSERT_VK_SUCCESS(err); } // Create a fence to import payload into VkFence import_fence; { VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &import_fence); ASSERT_VK_SUCCESS(err); } #ifdef _WIN32 // Export fence payload to an opaque handle HANDLE handle = nullptr; { VkFenceGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceWin32HandleKHR"); err = vkGetFenceWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceWin32HandleInfoKHR ifi = { VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR, nullptr, import_fence, 0, handle_type, handle, nullptr}; auto vkImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceWin32HandleKHR"); err = vkImportFenceWin32HandleKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #else // Export fence payload to an opaque handle int fd = 0; { VkFenceGetFdInfoKHR gfi = {VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceFdKHR"); err = vkGetFenceFdKHR(m_device->device(), &gfi, &fd); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceFdInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, nullptr, import_fence, 0, handle_type, fd}; auto vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceFdKHR"); err = vkImportFenceFdKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #endif // Signal the exported fence and wait on the imported fence vkQueueSubmit(m_device->m_queue, 0, nullptr, export_fence); vkWaitForFences(m_device->device(), 1, &import_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &import_fence); vkQueueSubmit(m_device->m_queue, 0, nullptr, export_fence); vkWaitForFences(m_device->device(), 1, &import_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &import_fence); // Signal the imported fence and wait on the exported fence vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); vkWaitForFences(m_device->device(), 1, &export_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &export_fence); vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); vkWaitForFences(m_device->device(), 1, &export_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &export_fence); // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroyFence(m_device->device(), export_fence, nullptr); vkDestroyFence(m_device->device(), import_fence, nullptr); m_errorMonitor->VerifyNotFound(); } extern "C" void *ReleaseNullFence(void *arg) { struct thread_data_struct *data = (struct thread_data_struct *)arg; for (int i = 0; i < 40000; i++) { vkDestroyFence(data->device, VK_NULL_HANDLE, NULL); if (data->bailout) { break; } } return NULL; } TEST_F(VkPositiveLayerTest, ThreadNullFenceCollision) { test_platform_thread thread; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "THREADING ERROR"); ASSERT_NO_FATAL_FAILURE(Init()); struct thread_data_struct data; data.device = m_device->device(); data.bailout = false; m_errorMonitor->SetBailout(&data.bailout); // Call vkDestroyFence of VK_NULL_HANDLE repeatedly using multiple threads. // There should be no validation error from collision of that non-object. test_platform_thread_create(&thread, ReleaseNullFence, (void *)&data); for (int i = 0; i < 40000; i++) { vkDestroyFence(m_device->device(), VK_NULL_HANDLE, NULL); } test_platform_thread_join(thread, NULL); m_errorMonitor->SetBailout(NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ClearColorImageWithValidRange) { TEST_DESCRIPTION("Record clear color with a valid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearColorValue clear_color = {{0.0f, 0.0f, 0.0f, 1.0f}}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try good case { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyNotFound(); } // Try good case with VK_REMAINING { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyNotFound(); } } TEST_F(VkPositiveLayerTest, ClearDepthStencilWithValidRange) { TEST_DESCRIPTION("Record clear depth with a valid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageObj image(m_device); image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); const VkImageAspectFlags ds_aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; image.SetLayout(ds_aspect, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearDepthStencilValue clear_value = {}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try good case { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyNotFound(); } // Try good case with VK_REMAINING { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {ds_aspect, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyNotFound(); } } TEST_F(VkPositiveLayerTest, CreateGraphicsPipelineWithIgnoredPointers) { TEST_DESCRIPTION("Create Graphics Pipeline with pointers that must be ignored by layers"); ASSERT_NO_FATAL_FAILURE(Init()); m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu()); ASSERT_TRUE(m_depth_stencil_fmt != 0); m_depthStencil->Init(m_device, static_cast<int32_t>(m_width), static_cast<int32_t>(m_height), m_depth_stencil_fmt); ASSERT_NO_FATAL_FAILURE(InitRenderTarget(m_depthStencil->BindInfo())); const uint64_t fake_address_64 = 0xCDCDCDCDCDCDCDCD; const uint64_t fake_address_32 = 0xCDCDCDCD; void *hopefully_undereferencable_pointer = sizeof(void *) == 8 ? reinterpret_cast<void *>(fake_address_64) : reinterpret_cast<void *>(fake_address_32); VkShaderObj vs(m_device, "#version 450\nvoid main(){gl_Position = vec4(0.0, 0.0, 0.0, 1.0);}\n", VK_SHADER_STAGE_VERTEX_BIT, this); const VkPipelineVertexInputStateCreateInfo pipeline_vertex_input_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 0, nullptr, // bindings 0, nullptr // attributes }; const VkPipelineInputAssemblyStateCreateInfo pipeline_input_assembly_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, VK_FALSE // primitive restart }; const VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info_template{ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_FALSE, // depthClamp VK_FALSE, // rasterizerDiscardEnable VK_POLYGON_MODE_FILL, VK_CULL_MODE_NONE, VK_FRONT_FACE_COUNTER_CLOCKWISE, VK_FALSE, // depthBias 0.0f, 0.0f, 0.0f, // depthBias params 1.0f // lineWidth }; VkPipelineLayout pipeline_layout; { VkPipelineLayoutCreateInfo pipeline_layout_create_info{ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, nullptr, // pNext 0, // flags 0, nullptr, // layouts 0, nullptr // push constants }; VkResult err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_create_info, nullptr, &pipeline_layout); ASSERT_VK_SUCCESS(err); } // try disabled rasterizer and no tessellation { m_errorMonitor->ExpectSuccess(); VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info = pipeline_rasterization_state_create_info_template; pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_TRUE; VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, nullptr, // pNext 0, // flags 1, // stageCount &vs.GetStageCreateInfo(), &pipeline_vertex_input_state_create_info, &pipeline_input_assembly_state_create_info, reinterpret_cast<const VkPipelineTessellationStateCreateInfo *>(hopefully_undereferencable_pointer), reinterpret_cast<const VkPipelineViewportStateCreateInfo *>(hopefully_undereferencable_pointer), &pipeline_rasterization_state_create_info, reinterpret_cast<const VkPipelineMultisampleStateCreateInfo *>(hopefully_undereferencable_pointer), reinterpret_cast<const VkPipelineDepthStencilStateCreateInfo *>(hopefully_undereferencable_pointer), reinterpret_cast<const VkPipelineColorBlendStateCreateInfo *>(hopefully_undereferencable_pointer), nullptr, // dynamic states pipeline_layout, m_renderPass, 0, // subpass VK_NULL_HANDLE, 0}; VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline); m_errorMonitor->VerifyNotFound(); vkDestroyPipeline(m_device->handle(), pipeline, nullptr); } const VkPipelineMultisampleStateCreateInfo pipeline_multisample_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_SAMPLE_COUNT_1_BIT, VK_FALSE, // sample shading 0.0f, // minSampleShading nullptr, // pSampleMask VK_FALSE, // alphaToCoverageEnable VK_FALSE // alphaToOneEnable }; // try enabled rasterizer but no subpass attachments { m_errorMonitor->ExpectSuccess(); VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info = pipeline_rasterization_state_create_info_template; pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_FALSE; VkViewport viewport = {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {static_cast<uint32_t>(m_width), static_cast<uint32_t>(m_height)}}; const VkPipelineViewportStateCreateInfo pipeline_viewport_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 1, &viewport, 1, &scissor}; VkRenderPass render_pass; { VkSubpassDescription subpass_desc = {}; VkRenderPassCreateInfo render_pass_create_info{ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, // pNext 0, // flags 0, nullptr, // attachments 1, &subpass_desc, 0, nullptr // subpass dependencies }; VkResult err = vkCreateRenderPass(m_device->handle(), &render_pass_create_info, nullptr, &render_pass); ASSERT_VK_SUCCESS(err); } VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, nullptr, // pNext 0, // flags 1, // stageCount &vs.GetStageCreateInfo(), &pipeline_vertex_input_state_create_info, &pipeline_input_assembly_state_create_info, nullptr, &pipeline_viewport_state_create_info, &pipeline_rasterization_state_create_info, &pipeline_multisample_state_create_info, reinterpret_cast<const VkPipelineDepthStencilStateCreateInfo *>(hopefully_undereferencable_pointer), reinterpret_cast<const VkPipelineColorBlendStateCreateInfo *>(hopefully_undereferencable_pointer), nullptr, // dynamic states pipeline_layout, render_pass, 0, // subpass VK_NULL_HANDLE, 0}; VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline); m_errorMonitor->VerifyNotFound(); vkDestroyPipeline(m_device->handle(), pipeline, nullptr); vkDestroyRenderPass(m_device->handle(), render_pass, nullptr); } // try dynamic viewport and scissor { m_errorMonitor->ExpectSuccess(); VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info = pipeline_rasterization_state_create_info_template; pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_FALSE; const VkPipelineViewportStateCreateInfo pipeline_viewport_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 1, reinterpret_cast<const VkViewport *>(hopefully_undereferencable_pointer), 1, reinterpret_cast<const VkRect2D *>(hopefully_undereferencable_pointer)}; const VkPipelineDepthStencilStateCreateInfo pipeline_depth_stencil_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, nullptr, // pNext 0, // flags }; const VkPipelineColorBlendAttachmentState pipeline_color_blend_attachment_state = {}; const VkPipelineColorBlendStateCreateInfo pipeline_color_blend_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_FALSE, VK_LOGIC_OP_CLEAR, 1, &pipeline_color_blend_attachment_state, {0.0f, 0.0f, 0.0f, 0.0f}}; const VkDynamicState dynamic_states[2] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; const VkPipelineDynamicStateCreateInfo pipeline_dynamic_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 2, dynamic_states}; VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, nullptr, // pNext 0, // flags 1, // stageCount &vs.GetStageCreateInfo(), &pipeline_vertex_input_state_create_info, &pipeline_input_assembly_state_create_info, nullptr, &pipeline_viewport_state_create_info, &pipeline_rasterization_state_create_info, &pipeline_multisample_state_create_info, &pipeline_depth_stencil_state_create_info, &pipeline_color_blend_state_create_info, &pipeline_dynamic_state_create_info, // dynamic states pipeline_layout, m_renderPass, 0, // subpass VK_NULL_HANDLE, 0}; VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline); m_errorMonitor->VerifyNotFound(); vkDestroyPipeline(m_device->handle(), pipeline, nullptr); } vkDestroyPipelineLayout(m_device->handle(), pipeline_layout, nullptr); } TEST_F(VkPositiveLayerTest, ExternalMemory) { TEST_DESCRIPTION("Perform a copy through a pair of buffers linked by external memory"); #ifdef _WIN32 const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; #else const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external memory instance extensions std::vector<const char *> reqd_instance_extensions = { {VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME}}; for (auto extension_name : reqd_instance_extensions) { if (InstanceExtensionSupported(extension_name)) { m_instance_extension_names.push_back(extension_name); } else { printf("%s Required instance extension %s not supported, skipping test\n", kSkipPrefix, extension_name); return; } } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for import/export capability VkPhysicalDeviceExternalBufferInfoKHR ebi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR, nullptr, 0, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, handle_type}; VkExternalBufferPropertiesKHR ebp = {VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR, nullptr, {0, 0, 0}}; auto vkGetPhysicalDeviceExternalBufferPropertiesKHR = (PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalBufferPropertiesKHR"); ASSERT_TRUE(vkGetPhysicalDeviceExternalBufferPropertiesKHR != nullptr); vkGetPhysicalDeviceExternalBufferPropertiesKHR(gpu(), &ebi, &ebp); if (!(ebp.externalMemoryProperties.compatibleHandleTypes & handle_type) || !(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) || !(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External buffer does not support importing and exporting, skipping test\n", kSkipPrefix); return; } // Check if dedicated allocation is required bool dedicated_allocation = ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR; if (dedicated_allocation) { if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s Dedicated allocation extension not supported, skipping test\n", kSkipPrefix); return; } } // Check for external memory device extensions if (DeviceExtensionSupported(gpu(), nullptr, ext_mem_extension_name)) { m_device_extension_names.push_back(ext_mem_extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); } else { printf("%s External memory extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); VkMemoryPropertyFlags mem_flags = 0; const VkDeviceSize buffer_size = 1024; // Create export and import buffers const VkExternalMemoryBufferCreateInfoKHR external_buffer_info = {VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR, nullptr, handle_type}; auto buffer_info = VkBufferObj::create_info(buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT); buffer_info.pNext = &external_buffer_info; VkBufferObj buffer_export; buffer_export.init_no_mem(*m_device, buffer_info); VkBufferObj buffer_import; buffer_import.init_no_mem(*m_device, buffer_info); // Allocation info auto alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_export.memory_requirements(), mem_flags); // Add export allocation info to pNext chain VkExportMemoryAllocateInfoKHR export_info = {VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR, nullptr, handle_type}; alloc_info.pNext = &export_info; // Add dedicated allocation info to pNext chain if required VkMemoryDedicatedAllocateInfoKHR dedicated_info = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR, nullptr, VK_NULL_HANDLE, buffer_export.handle()}; if (dedicated_allocation) { export_info.pNext = &dedicated_info; } // Allocate memory to be exported vk_testing::DeviceMemory memory_export; memory_export.init(*m_device, alloc_info); // Bind exported memory buffer_export.bind_memory(memory_export, 0); #ifdef _WIN32 // Export memory to handle auto vkGetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR)vkGetInstanceProcAddr(instance(), "vkGetMemoryWin32HandleKHR"); ASSERT_TRUE(vkGetMemoryWin32HandleKHR != nullptr); VkMemoryGetWin32HandleInfoKHR mghi = {VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR, nullptr, memory_export.handle(), handle_type}; HANDLE handle; ASSERT_VK_SUCCESS(vkGetMemoryWin32HandleKHR(m_device->device(), &mghi, &handle)); VkImportMemoryWin32HandleInfoKHR import_info = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR, nullptr, handle_type, handle}; #else // Export memory to fd auto vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vkGetInstanceProcAddr(instance(), "vkGetMemoryFdKHR"); ASSERT_TRUE(vkGetMemoryFdKHR != nullptr); VkMemoryGetFdInfoKHR mgfi = {VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR, nullptr, memory_export.handle(), handle_type}; int fd; ASSERT_VK_SUCCESS(vkGetMemoryFdKHR(m_device->device(), &mgfi, &fd)); VkImportMemoryFdInfoKHR import_info = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, nullptr, handle_type, fd}; #endif // Import memory alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_import.memory_requirements(), mem_flags); alloc_info.pNext = &import_info; vk_testing::DeviceMemory memory_import; memory_import.init(*m_device, alloc_info); // Bind imported memory buffer_import.bind_memory(memory_import, 0); // Create test buffers and fill input buffer VkMemoryPropertyFlags mem_prop = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; VkBufferObj buffer_input; buffer_input.init_as_src_and_dst(*m_device, buffer_size, mem_prop); auto input_mem = (uint8_t *)buffer_input.memory().map(); for (uint32_t i = 0; i < buffer_size; i++) { input_mem[i] = (i & 0xFF); } buffer_input.memory().unmap(); VkBufferObj buffer_output; buffer_output.init_as_src_and_dst(*m_device, buffer_size, mem_prop); // Copy from input buffer to output buffer through the exported/imported memory m_commandBuffer->begin(); VkBufferCopy copy_info = {0, 0, buffer_size}; vkCmdCopyBuffer(m_commandBuffer->handle(), buffer_input.handle(), buffer_export.handle(), 1, &copy_info); // Insert memory barrier to guarantee copy order VkMemoryBarrier mem_barrier = {VK_STRUCTURE_TYPE_MEMORY_BARRIER, nullptr, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT}; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 1, &mem_barrier, 0, nullptr, 0, nullptr); vkCmdCopyBuffer(m_commandBuffer->handle(), buffer_import.handle(), buffer_output.handle(), 1, &copy_info); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, AMDMixedAttachmentSamplesValidateGraphicsPipeline) { TEST_DESCRIPTION("Verify an error message for an incorrect graphics pipeline rasterization sample count."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkRenderpassObj render_pass(m_device); const VkPipelineLayoutObj pipeline_layout(m_device); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set a mismatched sample count VkPipelineMultisampleStateCreateInfo ms_state_ci = {}; ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT; VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&ms_state_ci); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505"); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, ParameterLayerFeatures2Capture) { TEST_DESCRIPTION("Ensure parameter_validation_layer correctly captures physical device features"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); VkResult err; m_errorMonitor->ExpectSuccess(); VkPhysicalDeviceFeatures2KHR features2; features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR; features2.pNext = nullptr; vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); // We're not creating a valid m_device, but the phy wrapper is useful vk_testing::PhysicalDevice physical_device(gpu()); vk_testing::QueueCreateInfoArray queue_info(physical_device.queue_properties()); // Only request creation with queuefamilies that have at least one queue std::vector<VkDeviceQueueCreateInfo> create_queue_infos; auto qci = queue_info.data(); for (uint32_t i = 0; i < queue_info.size(); ++i) { if (qci[i].queueCount) { create_queue_infos.push_back(qci[i]); } } VkDeviceCreateInfo dev_info = {}; dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; dev_info.pNext = &features2; dev_info.flags = 0; dev_info.queueCreateInfoCount = create_queue_infos.size(); dev_info.pQueueCreateInfos = create_queue_infos.data(); dev_info.enabledLayerCount = 0; dev_info.ppEnabledLayerNames = nullptr; dev_info.enabledExtensionCount = 0; dev_info.ppEnabledExtensionNames = nullptr; dev_info.pEnabledFeatures = nullptr; VkDevice device; err = vkCreateDevice(gpu(), &dev_info, nullptr, &device); ASSERT_VK_SUCCESS(err); if (features2.features.samplerAnisotropy) { // Test that the parameter layer is caching the features correctly using CreateSampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); // If the features were not captured correctly, this should cause an error sampler_ci.anisotropyEnable = VK_TRUE; sampler_ci.maxAnisotropy = physical_device.properties().limits.maxSamplerAnisotropy; VkSampler sampler = VK_NULL_HANDLE; err = vkCreateSampler(device, &sampler_ci, nullptr, &sampler); ASSERT_VK_SUCCESS(err); vkDestroySampler(device, sampler, nullptr); } else { printf("%s Feature samplerAnisotropy not enabled; parameter_layer check skipped.\n", kSkipPrefix); } // Verify the core validation layer has captured the physical device features by creating a a query pool. if (features2.features.pipelineStatisticsQuery) { VkQueryPool query_pool; VkQueryPoolCreateInfo qpci{}; qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; qpci.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS; qpci.queryCount = 1; err = vkCreateQueryPool(device, &qpci, nullptr, &query_pool); ASSERT_VK_SUCCESS(err); vkDestroyQueryPool(device, query_pool, nullptr); } else { printf("%s Feature pipelineStatisticsQuery not enabled; core_validation_layer check skipped.\n", kSkipPrefix); } vkDestroyDevice(device, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, GetMemoryRequirements2) { TEST_DESCRIPTION( "Get memory requirements with VK_KHR_get_memory_requirements2 instead of core entry points and verify layers do not emit " "errors when objects are bound and used"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for VK_KHR_get_memory_requirementes2 extensions if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); // Create a test buffer VkBufferObj buffer; buffer.init_no_mem(*m_device, VkBufferObj::create_info(1024, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT)); // Use extension to get buffer memory requirements auto vkGetBufferMemoryRequirements2KHR = reinterpret_cast<PFN_vkGetBufferMemoryRequirements2KHR>( vkGetDeviceProcAddr(m_device->device(), "vkGetBufferMemoryRequirements2KHR")); ASSERT_TRUE(vkGetBufferMemoryRequirements2KHR != nullptr); VkBufferMemoryRequirementsInfo2KHR buffer_info = {VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR, nullptr, buffer.handle()}; VkMemoryRequirements2KHR buffer_reqs = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR}; vkGetBufferMemoryRequirements2KHR(m_device->device(), &buffer_info, &buffer_reqs); // Allocate and bind buffer memory vk_testing::DeviceMemory buffer_memory; buffer_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_reqs.memoryRequirements, 0)); vkBindBufferMemory(m_device->device(), buffer.handle(), buffer_memory.handle(), 0); // Create a test image auto image_ci = vk_testing::Image::create_info(); image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.extent.width = 32; image_ci.extent.height = 32; image_ci.format = VK_FORMAT_R8G8B8A8_UNORM; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image image; image.init_no_mem(*m_device, image_ci); // Use extension to get image memory requirements auto vkGetImageMemoryRequirements2KHR = reinterpret_cast<PFN_vkGetImageMemoryRequirements2KHR>( vkGetDeviceProcAddr(m_device->device(), "vkGetImageMemoryRequirements2KHR")); ASSERT_TRUE(vkGetImageMemoryRequirements2KHR != nullptr); VkImageMemoryRequirementsInfo2KHR image_info = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR, nullptr, image.handle()}; VkMemoryRequirements2KHR image_reqs = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR}; vkGetImageMemoryRequirements2KHR(m_device->device(), &image_info, &image_reqs); // Allocate and bind image memory vk_testing::DeviceMemory image_memory; image_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image_reqs.memoryRequirements, 0)); vkBindImageMemory(m_device->device(), image.handle(), image_memory.handle(), 0); // Now execute arbitrary commands that use the test buffer and image m_commandBuffer->begin(); // Fill buffer with 0 vkCmdFillBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_WHOLE_SIZE, 0); // Transition and clear image const auto subresource_range = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT); const auto barrier = image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, subresource_range); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); const VkClearColorValue color = {}; vkCmdClearColorImage(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range); // Submit and verify no validation errors m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, BindMemory2) { TEST_DESCRIPTION( "Bind memory with VK_KHR_bind_memory2 instead of core entry points and verify layers do not emit errors when objects are " "used"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for VK_KHR_get_memory_requirementes2 extensions if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); } else { printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); // Create a test buffer VkBufferObj buffer; buffer.init_no_mem(*m_device, VkBufferObj::create_info(1024, VK_BUFFER_USAGE_TRANSFER_DST_BIT)); // Allocate buffer memory vk_testing::DeviceMemory buffer_memory; buffer_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer.memory_requirements(), 0)); // Bind buffer memory with extension auto vkBindBufferMemory2KHR = reinterpret_cast<PFN_vkBindBufferMemory2KHR>(vkGetDeviceProcAddr(m_device->device(), "vkBindBufferMemory2KHR")); ASSERT_TRUE(vkBindBufferMemory2KHR != nullptr); VkBindBufferMemoryInfoKHR buffer_bind_info = {VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR, nullptr, buffer.handle(), buffer_memory.handle(), 0}; vkBindBufferMemory2KHR(m_device->device(), 1, &buffer_bind_info); // Create a test image auto image_ci = vk_testing::Image::create_info(); image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.extent.width = 32; image_ci.extent.height = 32; image_ci.format = VK_FORMAT_R8G8B8A8_UNORM; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image image; image.init_no_mem(*m_device, image_ci); // Allocate image memory vk_testing::DeviceMemory image_memory; image_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image.memory_requirements(), 0)); // Bind image memory with extension auto vkBindImageMemory2KHR = reinterpret_cast<PFN_vkBindImageMemory2KHR>(vkGetDeviceProcAddr(m_device->device(), "vkBindImageMemory2KHR")); ASSERT_TRUE(vkBindImageMemory2KHR != nullptr); VkBindImageMemoryInfoKHR image_bind_info = {VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR, nullptr, image.handle(), image_memory.handle(), 0}; vkBindImageMemory2KHR(m_device->device(), 1, &image_bind_info); // Now execute arbitrary commands that use the test buffer and image m_commandBuffer->begin(); // Fill buffer with 0 vkCmdFillBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_WHOLE_SIZE, 0); // Transition and clear image const auto subresource_range = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT); const auto barrier = image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, subresource_range); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); const VkClearColorValue color = {}; vkCmdClearColorImage(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range); // Submit and verify no validation errors m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, MultiplaneImageTests) { TEST_DESCRIPTION("Positive test of multiplane image operations"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR; // All planes of equal extent ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify format VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } VkImage image; ASSERT_VK_SUCCESS(vkCreateImage(device(), &ci, NULL, &image)); // Allocate & bind memory VkPhysicalDeviceMemoryProperties phys_mem_props; vkGetPhysicalDeviceMemoryProperties(gpu(), &phys_mem_props); VkMemoryRequirements mem_reqs; vkGetImageMemoryRequirements(device(), image, &mem_reqs); VkDeviceMemory mem_obj = VK_NULL_HANDLE; VkMemoryPropertyFlagBits mem_props = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; for (uint32_t type = 0; type < phys_mem_props.memoryTypeCount; type++) { if ((mem_reqs.memoryTypeBits & (1 << type)) && ((phys_mem_props.memoryTypes[type].propertyFlags & mem_props) == mem_props)) { VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = mem_reqs.size; alloc_info.memoryTypeIndex = type; ASSERT_VK_SUCCESS(vkAllocateMemory(device(), &alloc_info, NULL, &mem_obj)); break; } } if (VK_NULL_HANDLE == mem_obj) { printf("%s Unable to allocate image memory. Skipping test.\n", kSkipPrefix); vkDestroyImage(device(), image, NULL); return; } ASSERT_VK_SUCCESS(vkBindImageMemory(device(), image, mem_obj, 0)); // Copy plane 0 to plane 2 VkImageCopy copyRegion = {}; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset = {0, 0, 0}; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset = {0, 0, 0}; copyRegion.extent.width = 128; copyRegion.extent.height = 128; copyRegion.extent.depth = 1; m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); m_commandBuffer->CopyImage(image, VK_IMAGE_LAYOUT_GENERAL, image, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); #if 0 // Copy to/from buffer VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.pNext = NULL; bci.size = 128 * 128 * 3; bci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; bci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; ASSERT_VK_SUCCESS(vkCreateBuffer(device(), &bci, NULL, &buffer)); VkBufferImageCopy copy_region = {}; copy_region.bufferRowLength = 128; copy_region.bufferImageHeight = 128; copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR; copy_region.imageSubresource.layerCount = 1; copy_region.imageExtent.height = 64; copy_region.imageExtent.width = 64; copy_region.imageExtent.depth = 1; m_errorMonitor->ExpectSuccess(); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image,VK_IMAGE_LAYOUT_GENERAL, buffer, 1, &copy_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer, image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); #endif vkFreeMemory(device(), mem_obj, NULL); vkDestroyImage(device(), image, NULL); // Test that changing the layout of ASPECT_COLOR also changes the layout of the individual planes VkBufferObj buffer; VkMemoryPropertyFlags reqs = 0; buffer.init_as_src(*m_device, (VkDeviceSize)128 * 128 * 3, reqs); VkImageObj mpimage(m_device); mpimage.Init(256, 256, 1, VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkBufferImageCopy copy_region = {}; copy_region.bufferRowLength = 128; copy_region.bufferImageHeight = 128; copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR; copy_region.imageSubresource.layerCount = 1; copy_region.imageExtent.height = 64; copy_region.imageExtent.width = 64; copy_region.imageExtent.depth = 1; vkResetCommandBuffer(m_commandBuffer->handle(), 0); m_commandBuffer->begin(); mpimage.ImageMemoryBarrier(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), mpimage.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyNotFound(); // Test to verify that views of multiplanar images have layouts tracked correctly // by changing the image's layout then using a view of that image VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = mpimage.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &ivci, nullptr, &view); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkDescriptorImageInfo image_info{}; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; image_info.imageView = view; image_info.sampler = sampler; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; img_barrier.image = mpimage.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); vkQueueWaitIdle(m_device->m_queue); vkDestroyImageView(m_device->device(), view, NULL); vkDestroySampler(m_device->device(), sampler, nullptr); } TEST_F(VkPositiveLayerTest, ApiVersionZero) { TEST_DESCRIPTION("Check that apiVersion = 0 is valid."); m_errorMonitor->ExpectSuccess(); app_info.apiVersion = 0U; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, DrawIndirectCountKHR) { TEST_DESCRIPTION("Test covered valid usage for vkCmdDrawIndirectCountKHR"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME); } else { printf(" VK_KHR_draw_indirect_count Extension not supported, skipping test\n"); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkMemoryRequirements memory_requirements; VkMemoryAllocateInfo memory_allocate_info = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO}; auto vkCmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdDrawIndirectCountKHR"); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = vec4(1, 0, 0, 1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkDescriptorSetObj descriptor_set(m_device); descriptor_set.AppendDummy(); descriptor_set.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptor_set.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptor_set); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); VkBufferCreateInfo buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; buffer_create_info.size = sizeof(VkDrawIndirectCommand); buffer_create_info.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; VkBuffer draw_buffer; vkCreateBuffer(m_device->device(), &buffer_create_info, nullptr, &draw_buffer); VkBufferCreateInfo count_buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; count_buffer_create_info.size = sizeof(uint32_t); count_buffer_create_info.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; VkBuffer count_buffer; vkCreateBuffer(m_device->device(), &count_buffer_create_info, nullptr, &count_buffer); vkGetBufferMemoryRequirements(m_device->device(), count_buffer, &memory_requirements); memory_allocate_info.allocationSize = memory_requirements.size; m_device->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VkDeviceMemory count_buffer_memory; vkAllocateMemory(m_device->device(), &memory_allocate_info, NULL, &count_buffer_memory); vkBindBufferMemory(m_device->device(), count_buffer, count_buffer_memory, 0); // VUID-vkCmdDrawIndirectCountKHR-buffer-03104 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-buffer-03104"); vkCmdDrawIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 0, 1, sizeof(VkDrawIndirectCommand)); m_errorMonitor->VerifyFound(); vkGetBufferMemoryRequirements(m_device->device(), draw_buffer, &memory_requirements); memory_allocate_info.allocationSize = memory_requirements.size; m_device->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VkDeviceMemory draw_buffer_memory; vkAllocateMemory(m_device->device(), &memory_allocate_info, NULL, &draw_buffer_memory); vkBindBufferMemory(m_device->device(), draw_buffer, draw_buffer_memory, 0); VkBuffer count_buffer_unbound; vkCreateBuffer(m_device->device(), &count_buffer_create_info, nullptr, &count_buffer_unbound); // VUID-vkCmdDrawIndirectCountKHR-countBuffer-03106 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-countBuffer-03106"); vkCmdDrawIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer_unbound, 0, 1, sizeof(VkDrawIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndirectCountKHR-offset-03108 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-offset-03108"); vkCmdDrawIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 1, count_buffer, 0, 1, sizeof(VkDrawIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndirectCountKHR-countBufferOffset-03109 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-countBufferOffset-03109"); vkCmdDrawIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 1, 1, sizeof(VkDrawIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndirectCountKHR-stride-03110 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-stride-03110"); vkCmdDrawIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 0, 1, 1); m_errorMonitor->VerifyFound(); // TODO: These covered VUIDs aren't tested. There is also no test coverage for the core Vulkan 1.0 vkCmdDraw* equivalent of // these: // VUID-vkCmdDrawIndirectCountKHR-renderPass-03113 // VUID-vkCmdDrawIndirectCountKHR-subpass-03114 // VUID-vkCmdDrawIndirectCountKHR-None-03120 m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), draw_buffer, 0); vkDestroyBuffer(m_device->device(), count_buffer, 0); vkDestroyBuffer(m_device->device(), count_buffer_unbound, 0); vkFreeMemory(m_device->device(), draw_buffer_memory, 0); vkFreeMemory(m_device->device(), count_buffer_memory, 0); } TEST_F(VkLayerTest, DrawIndexedIndirectCountKHR) { TEST_DESCRIPTION("Test covered valid usage for vkCmdDrawIndexedIndirectCountKHR"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME); } else { printf(" VK_KHR_draw_indirect_count Extension not supported, skipping test\n"); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkMemoryRequirements memory_requirements; VkMemoryAllocateInfo memory_allocate_info = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO}; auto vkCmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdDrawIndexedIndirectCountKHR"); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = vec4(1, 0, 0, 1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptorSet); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); VkBufferCreateInfo buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; buffer_create_info.size = sizeof(VkDrawIndexedIndirectCommand); buffer_create_info.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; VkBuffer draw_buffer; vkCreateBuffer(m_device->device(), &buffer_create_info, nullptr, &draw_buffer); vkGetBufferMemoryRequirements(m_device->device(), draw_buffer, &memory_requirements); memory_allocate_info.allocationSize = memory_requirements.size; m_device->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VkDeviceMemory draw_buffer_memory; vkAllocateMemory(m_device->device(), &memory_allocate_info, NULL, &draw_buffer_memory); vkBindBufferMemory(m_device->device(), draw_buffer, draw_buffer_memory, 0); VkBufferCreateInfo count_buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; count_buffer_create_info.size = sizeof(uint32_t); count_buffer_create_info.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; VkBuffer count_buffer; vkCreateBuffer(m_device->device(), &count_buffer_create_info, nullptr, &count_buffer); vkGetBufferMemoryRequirements(m_device->device(), count_buffer, &memory_requirements); memory_allocate_info.allocationSize = memory_requirements.size; m_device->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VkDeviceMemory count_buffer_memory; vkAllocateMemory(m_device->device(), &memory_allocate_info, NULL, &count_buffer_memory); vkBindBufferMemory(m_device->device(), count_buffer, count_buffer_memory, 0); VkBufferCreateInfo index_buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; index_buffer_create_info.size = sizeof(uint32_t); index_buffer_create_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; VkBuffer index_buffer; vkCreateBuffer(m_device->device(), &index_buffer_create_info, nullptr, &index_buffer); vkGetBufferMemoryRequirements(m_device->device(), index_buffer, &memory_requirements); memory_allocate_info.allocationSize = memory_requirements.size; m_device->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VkDeviceMemory index_buffer_memory; vkAllocateMemory(m_device->device(), &memory_allocate_info, NULL, &index_buffer_memory); vkBindBufferMemory(m_device->device(), index_buffer, index_buffer_memory, 0); // VUID-vkCmdDrawIndexedIndirectCountKHR-None-03152 (partial - only tests whether the index buffer is bound) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-None-03152"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 0, 1, sizeof(VkDrawIndexedIndirectCommand)); m_errorMonitor->VerifyFound(); vkCmdBindIndexBuffer(m_commandBuffer->handle(), index_buffer, 0, VK_INDEX_TYPE_UINT32); VkBuffer draw_buffer_unbound; vkCreateBuffer(m_device->device(), &count_buffer_create_info, nullptr, &draw_buffer_unbound); // VUID-vkCmdDrawIndexedIndirectCountKHR-buffer-03136 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-buffer-03136"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer_unbound, 0, count_buffer, 0, 1, sizeof(VkDrawIndexedIndirectCommand)); m_errorMonitor->VerifyFound(); VkBuffer count_buffer_unbound; vkCreateBuffer(m_device->device(), &count_buffer_create_info, nullptr, &count_buffer_unbound); // VUID-vkCmdDrawIndexedIndirectCountKHR-countBuffer-03138 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-countBuffer-03138"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer_unbound, 0, 1, sizeof(VkDrawIndexedIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndexedIndirectCountKHR-offset-03140 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-offset-03140"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 1, count_buffer, 0, 1, sizeof(VkDrawIndexedIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndexedIndirectCountKHR-countBufferOffset-03141 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-countBufferOffset-03141"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 1, 1, sizeof(VkDrawIndexedIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndexedIndirectCountKHR-stride-03142 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-stride-03142"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 0, 1, 1); m_errorMonitor->VerifyFound(); // TODO: These covered VUIDs aren't tested. There is also no test coverage for the core Vulkan 1.0 vkCmdDraw* equivalent of // these: // VUID-vkCmdDrawIndexedIndirectCountKHR-renderPass-03145 // VUID-vkCmdDrawIndexedIndirectCountKHR-subpass-03146 // VUID-vkCmdDrawIndexedIndirectCountKHR-None-03152 (partial) m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), draw_buffer, 0); vkDestroyBuffer(m_device->device(), draw_buffer_unbound, 0); vkDestroyBuffer(m_device->device(), count_buffer, 0); vkDestroyBuffer(m_device->device(), count_buffer_unbound, 0); vkDestroyBuffer(m_device->device(), index_buffer, 0); vkFreeMemory(m_device->device(), draw_buffer_memory, 0); vkFreeMemory(m_device->device(), count_buffer_memory, 0); vkFreeMemory(m_device->device(), index_buffer_memory, 0); } TEST_F(VkLayerTest, ExclusiveScissorNV) { TEST_DESCRIPTION("Test VK_NV_scissor_exclusive with multiViewport disabled."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array<const char *, 1> required_device_extensions = {{VK_NV_SCISSOR_EXCLUSIVE_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables exclusive scissor but disables multiViewport auto exclusive_scissor_features = lvl_init_struct<VkPhysicalDeviceExclusiveScissorFeaturesNV>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&exclusive_scissor_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); features2.features.multiViewport = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Based on PSOViewportStateTests { VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[100] = {scissor, scissor}; using std::vector; struct TestCase { uint32_t viewport_count; VkViewport *viewports; uint32_t scissor_count; VkRect2D *scissors; uint32_t exclusive_scissor_count; VkRect2D *exclusive_scissors; vector<std::string> vuids; }; vector<TestCase> test_cases = { {1, viewports, 1, scissors, 2, scissors, {"VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02027", "VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02029"}}, {1, viewports, 1, scissors, 100, scissors, {"VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02027", "VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02028", "VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02029"}}, {1, viewports, 1, scissors, 1, nullptr, {"VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-pDynamicStates-02030"}}, }; for (const auto &test_case : test_cases) { VkPipelineViewportExclusiveScissorStateCreateInfoNV exc = { VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV}; const auto break_vp = [&test_case, &exc](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; helper.vp_state_ci_.pNext = &exc; exc.exclusiveScissorCount = test_case.exclusive_scissor_count; exc.pExclusiveScissors = test_case.exclusive_scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } } // Based on SetDynScissorParamTests { auto vkCmdSetExclusiveScissorNV = (PFN_vkCmdSetExclusiveScissorNV)vkGetDeviceProcAddr(m_device->device(), "vkCmdSetExclusiveScissorNV"); const VkRect2D scissor = {{0, 0}, {16, 16}}; const VkRect2D scissors[] = {scissor, scissor}; m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetExclusiveScissorNV-firstExclusiveScissor-02035"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 1, 1, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdSetExclusiveScissorNV: parameter exclusiveScissorCount must be greater than 0"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetExclusiveScissorNV-exclusiveScissorCount-02036"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 0, 2, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdSetExclusiveScissorNV: parameter exclusiveScissorCount must be greater than 0"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetExclusiveScissorNV-firstExclusiveScissor-02035"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 1, 0, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetExclusiveScissorNV-firstExclusiveScissor-02035"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetExclusiveScissorNV-exclusiveScissorCount-02036"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 1, 2, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdSetExclusiveScissorNV: required parameter pExclusiveScissors specified as NULL"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 0, 1, nullptr); m_errorMonitor->VerifyFound(); struct TestCase { VkRect2D scissor; std::string vuid; }; std::vector<TestCase> test_cases = { {{{-1, 0}, {16, 16}}, "VUID-vkCmdSetExclusiveScissorNV-x-02037"}, {{{0, -1}, {16, 16}}, "VUID-vkCmdSetExclusiveScissorNV-x-02037"}, {{{1, 0}, {INT32_MAX, 16}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02038"}, {{{INT32_MAX, 0}, {1, 16}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02038"}, {{{0, 0}, {uint32_t{INT32_MAX} + 1, 16}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02038"}, {{{0, 1}, {16, INT32_MAX}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02039"}, {{{0, INT32_MAX}, {16, 1}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02039"}, {{{0, 0}, {16, uint32_t{INT32_MAX} + 1}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02039"}}; for (const auto &test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuid); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 0, 1, &test_case.scissor); m_errorMonitor->VerifyFound(); } m_commandBuffer->end(); } } TEST_F(VkLayerTest, ShadingRateImageNV) { TEST_DESCRIPTION("Test VK_NV_shading_rate_image."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array<const char *, 1> required_device_extensions = {{VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables shading_rate_image but disables multiViewport auto shading_rate_image_features = lvl_init_struct<VkPhysicalDeviceShadingRateImageFeaturesNV>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&shading_rate_image_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); features2.features.multiViewport = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Test shading rate image creation VkImage image = VK_NULL_HANDLE; VkResult result = VK_RESULT_MAX_ENUM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8_UINT; image_create_info.extent.width = 4; image_create_info.extent.height = 4; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT; // image type must be 2D image_create_info.imageType = VK_IMAGE_TYPE_3D; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-02082"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.imageType = VK_IMAGE_TYPE_2D; // must be single sample image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02083"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; // tiling must be optimal image_create_info.tiling = VK_IMAGE_TILING_LINEAR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-tiling-02084"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Should succeed. result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyNotFound(); // bind memory to the image VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); result = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(result); result = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(result); // Test image view creation VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image; ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8_UINT; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // view type must be 2D or 2D_ARRAY ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-02086"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01003"); result = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImageView(m_device->device(), view, NULL); view = VK_NULL_HANDLE; } ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; // format must be R8_UINT ivci.format = VK_FORMAT_R8_UNORM; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-02087"); result = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImageView(m_device->device(), view, NULL); view = VK_NULL_HANDLE; } ivci.format = VK_FORMAT_R8_UINT; vkCreateImageView(m_device->device(), &ivci, nullptr, &view); m_errorMonitor->VerifyNotFound(); // Test pipeline creation VkPipelineViewportShadingRateImageStateCreateInfoNV vsrisci = { VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV}; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[20] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[20] = {scissor, scissor}; VkDynamicState dynPalette = VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV; VkPipelineDynamicStateCreateInfo dyn = {VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, nullptr, 0, 1, &dynPalette}; // viewportCount must be 0 or 1 when multiViewport is disabled { const auto break_vp = [&](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = 2; helper.vp_state_ci_.pViewports = viewports; helper.vp_state_ci_.scissorCount = 2; helper.vp_state_ci_.pScissors = scissors; helper.vp_state_ci_.pNext = &vsrisci; helper.dyn_state_ci_ = dyn; vsrisci.shadingRateImageEnable = VK_TRUE; vsrisci.viewportCount = 2; }; CreatePipelineHelper::OneshotTest( *this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector<std::string>({"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-viewportCount-02054", "VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"})); } // viewportCounts must match { const auto break_vp = [&](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = 1; helper.vp_state_ci_.pViewports = viewports; helper.vp_state_ci_.scissorCount = 1; helper.vp_state_ci_.pScissors = scissors; helper.vp_state_ci_.pNext = &vsrisci; helper.dyn_state_ci_ = dyn; vsrisci.shadingRateImageEnable = VK_TRUE; vsrisci.viewportCount = 0; }; CreatePipelineHelper::OneshotTest( *this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector<std::string>({"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-shadingRateImageEnable-02056"})); } // pShadingRatePalettes must not be NULL. { const auto break_vp = [&](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = 1; helper.vp_state_ci_.pViewports = viewports; helper.vp_state_ci_.scissorCount = 1; helper.vp_state_ci_.pScissors = scissors; helper.vp_state_ci_.pNext = &vsrisci; vsrisci.shadingRateImageEnable = VK_TRUE; vsrisci.viewportCount = 1; }; CreatePipelineHelper::OneshotTest( *this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector<std::string>({"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-pDynamicStates-02057"})); } // Create an image without the SRI bit VkImageObj nonSRIimage(m_device); nonSRIimage.Init(256, 256, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(nonSRIimage.initialized()); VkImageView nonSRIview = nonSRIimage.targetView(VK_FORMAT_B8G8R8A8_UNORM); // Test SRI layout on non-SRI image VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = nullptr; img_barrier.srcAccessMask = 0; img_barrier.dstAccessMask = 0; img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV; img_barrier.image = nonSRIimage.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; m_commandBuffer->begin(); // Error trying to convert it to SRI layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-02088"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // succeed converting it to GENERAL img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyNotFound(); // Test vkCmdBindShadingRateImageNV errors auto vkCmdBindShadingRateImageNV = (PFN_vkCmdBindShadingRateImageNV)vkGetDeviceProcAddr(m_device->device(), "vkCmdBindShadingRateImageNV"); // if the view is non-NULL, it must be R8_UINT, USAGE_SRI, image layout must match, layout must be valid m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindShadingRateImageNV-imageView-02060"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindShadingRateImageNV-imageView-02061"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindShadingRateImageNV-imageView-02062"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063"); vkCmdBindShadingRateImageNV(m_commandBuffer->handle(), nonSRIview, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); m_errorMonitor->VerifyFound(); // Test vkCmdSetViewportShadingRatePaletteNV errors auto vkCmdSetViewportShadingRatePaletteNV = (PFN_vkCmdSetViewportShadingRatePaletteNV)vkGetDeviceProcAddr(m_device->device(), "vkCmdSetViewportShadingRatePaletteNV"); VkShadingRatePaletteEntryNV paletteEntries[100] = {}; VkShadingRatePaletteNV palette = {100, paletteEntries}; VkShadingRatePaletteNV palettes[] = {palette, palette}; // errors on firstViewport/viewportCount m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02066"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02067"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02068"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewportShadingRatePaletteNV-viewportCount-02069"); vkCmdSetViewportShadingRatePaletteNV(m_commandBuffer->handle(), 20, 2, palettes); m_errorMonitor->VerifyFound(); // shadingRatePaletteEntryCount must be in range m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071"); vkCmdSetViewportShadingRatePaletteNV(m_commandBuffer->handle(), 0, 1, palettes); m_errorMonitor->VerifyFound(); VkCoarseSampleLocationNV locations[100] = { {0, 0, 0}, {0, 0, 1}, {0, 1, 0}, {0, 1, 1}, {0, 1, 1}, // duplicate {1000, 0, 0}, // pixelX too large {0, 1000, 0}, // pixelY too large {0, 0, 1000}, // sample too large }; // Test custom sample orders, both via pipeline state and via dynamic state { VkCoarseSampleOrderCustomNV sampOrdBadShadingRate = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV, 1, 1, locations}; VkCoarseSampleOrderCustomNV sampOrdBadSampleCount = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 3, 1, locations}; VkCoarseSampleOrderCustomNV sampOrdBadSampleLocationCount = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2, 2, locations}; VkCoarseSampleOrderCustomNV sampOrdDuplicateLocations = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2, 1 * 2 * 2, &locations[1]}; VkCoarseSampleOrderCustomNV sampOrdOutOfRangeLocations = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2, 1 * 2 * 2, &locations[4]}; VkCoarseSampleOrderCustomNV sampOrdTooLargeSampleLocationCount = { VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV, 4, 64, &locations[8]}; VkCoarseSampleOrderCustomNV sampOrdGood = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2, 1 * 2 * 2, &locations[0]}; VkPipelineViewportCoarseSampleOrderStateCreateInfoNV csosci = { VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV}; csosci.sampleOrderType = VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV; csosci.customSampleOrderCount = 1; using std::vector; struct TestCase { const VkCoarseSampleOrderCustomNV *order; vector<std::string> vuids; }; vector<TestCase> test_cases = { {&sampOrdBadShadingRate, {"VUID-VkCoarseSampleOrderCustomNV-shadingRate-02073"}}, {&sampOrdBadSampleCount, {"VUID-VkCoarseSampleOrderCustomNV-sampleCount-02074", "VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02075"}}, {&sampOrdBadSampleLocationCount, {"VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02075"}}, {&sampOrdDuplicateLocations, {"VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077"}}, {&sampOrdOutOfRangeLocations, {"VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077", "VUID-VkCoarseSampleLocationNV-pixelX-02078", "VUID-VkCoarseSampleLocationNV-pixelY-02079", "VUID-VkCoarseSampleLocationNV-sample-02080"}}, {&sampOrdTooLargeSampleLocationCount, {"VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02076", "VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077"}}, {&sampOrdGood, {}}, }; for (const auto &test_case : test_cases) { const auto break_vp = [&](CreatePipelineHelper &helper) { helper.vp_state_ci_.pNext = &csosci; csosci.pCustomSampleOrders = test_case.order; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } // Test vkCmdSetCoarseSampleOrderNV errors auto vkCmdSetCoarseSampleOrderNV = (PFN_vkCmdSetCoarseSampleOrderNV)vkGetDeviceProcAddr(m_device->device(), "vkCmdSetCoarseSampleOrderNV"); for (const auto &test_case : test_cases) { for (uint32_t i = 0; i < test_case.vuids.size(); ++i) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids[i]); } vkCmdSetCoarseSampleOrderNV(m_commandBuffer->handle(), VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV, 1, test_case.order); if (test_case.vuids.size()) { m_errorMonitor->VerifyFound(); } else { m_errorMonitor->VerifyNotFound(); } } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetCoarseSampleOrderNV-sampleOrderType-02081"); vkCmdSetCoarseSampleOrderNV(m_commandBuffer->handle(), VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV, 1, &sampOrdGood); m_errorMonitor->VerifyFound(); } m_commandBuffer->end(); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), image_memory, NULL); } TEST_F(VkLayerTest, CornerSampledImageNV) { TEST_DESCRIPTION("Test VK_NV_corner_sampled_image."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array<const char *, 1> required_device_extensions = {{VK_NV_CORNER_SAMPLED_IMAGE_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables exclusive scissor but disables multiViewport auto corner_sampled_image_features = lvl_init_struct<VkPhysicalDeviceCornerSampledImageFeaturesNV>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&corner_sampled_image_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); VkImage image = VK_NULL_HANDLE; VkResult result = VK_RESULT_MAX_ENUM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_1D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 2; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV; // image type must be 2D or 3D m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-02050"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // cube/depth not supported image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.extent.height = 2; image_create_info.format = VK_FORMAT_D24_UNORM_S8_UINT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-02051"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; // 2D width/height must be > 1 image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.extent.height = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-02052"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // 3D width/height/depth must be > 1 image_create_info.imageType = VK_IMAGE_TYPE_3D; image_create_info.extent.height = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-02053"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.imageType = VK_IMAGE_TYPE_2D; // Valid # of mip levels image_create_info.extent = {7, 7, 1}; image_create_info.mipLevels = 3; // 3 = ceil(log2(7)) result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyNotFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.extent = {8, 8, 1}; image_create_info.mipLevels = 3; // 3 = ceil(log2(8)) result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyNotFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.extent = {9, 9, 1}; image_create_info.mipLevels = 3; // 4 = ceil(log2(9)) result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyNotFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // Invalid # of mip levels image_create_info.extent = {8, 8, 1}; image_create_info.mipLevels = 4; // 3 = ceil(log2(8)) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00958"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } } TEST_F(VkLayerTest, MeshShaderNV) { TEST_DESCRIPTION("Test VK_NV_mesh_shader."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array<const char *, 1> required_device_extensions = {{VK_NV_MESH_SHADER_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables mesh_shader auto mesh_shader_features = lvl_init_struct<VkPhysicalDeviceMeshShaderFeaturesNV>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&mesh_shader_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); features2.features.multiDrawIndirect = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); static const char vertShaderText[] = "#version 450\n" "vec2 vertices[3];\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " gl_PointSize = 1.0f;\n" "}\n"; static const char meshShaderText[] = "#version 450\n" "#extension GL_NV_mesh_shader : require\n" "layout(local_size_x = 1) in;\n" "layout(max_vertices = 3) out;\n" "layout(max_primitives = 1) out;\n" "layout(triangles) out;\n" "void main() {\n" " gl_MeshVerticesNV[0].gl_Position = vec4(-1.0, -1.0, 0, 1);\n" " gl_MeshVerticesNV[1].gl_Position = vec4( 1.0, -1.0, 0, 1);\n" " gl_MeshVerticesNV[2].gl_Position = vec4( 0.0, 1.0, 0, 1);\n" " gl_PrimitiveIndicesNV[0] = 0;\n" " gl_PrimitiveIndicesNV[1] = 1;\n" " gl_PrimitiveIndicesNV[2] = 2;\n" " gl_PrimitiveCountNV = 1;\n" "}\n"; VkShaderObj vs(m_device, vertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj ms(m_device, meshShaderText, VK_SHADER_STAGE_MESH_BIT_NV, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Test pipeline creation { // can't mix mesh with vertex const auto break_vp = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo(), ms.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector<std::string>({"VUID-VkGraphicsPipelineCreateInfo-pStages-02095"})); // vertex or mesh must be present const auto break_vp2 = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, break_vp2, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector<std::string>({"VUID-VkGraphicsPipelineCreateInfo-stage-02096"})); // vertexinput and inputassembly must be valid when vertex stage is present const auto break_vp3 = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; helper.gp_ci_.pVertexInputState = nullptr; helper.gp_ci_.pInputAssemblyState = nullptr; }; CreatePipelineHelper::OneshotTest(*this, break_vp3, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector<std::string>({"VUID-VkGraphicsPipelineCreateInfo-pStages-02097", "VUID-VkGraphicsPipelineCreateInfo-pStages-02098"})); } PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV = (PFN_vkCmdDrawMeshTasksIndirectNV)vkGetInstanceProcAddr(instance(), "vkCmdDrawMeshTasksIndirectNV"); VkBufferCreateInfo buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; buffer_create_info.size = sizeof(uint32_t); buffer_create_info.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; VkBuffer buffer; VkResult result = vkCreateBuffer(m_device->device(), &buffer_create_info, nullptr, &buffer); ASSERT_VK_SUCCESS(result); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawMeshTasksIndirectNV-drawCount-02146"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawMeshTasksIndirectNV-drawCount-02147"); vkCmdDrawMeshTasksIndirectNV(m_commandBuffer->handle(), buffer, 0, 2, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), buffer, 0); } TEST_F(VkLayerTest, MeshShaderDisabledNV) { TEST_DESCRIPTION("Test VK_NV_mesh_shader VUs with NV_mesh_shader disabled."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-02107"); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-02108"); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResetEvent-stageMask-02109"); vkCmdResetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResetEvent-stageMask-02110"); vkCmdResetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-srcStageMask-02111"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-dstStageMask-02113"); vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, 0, nullptr, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-srcStageMask-02112"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-dstStageMask-02114"); vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, 0, nullptr, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-02115"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstStageMask-02117"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, 0, 0, nullptr, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-02116"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstStageMask-02118"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, 0, 0, nullptr, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); VkSemaphoreCreateInfo semaphore_create_info = {}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; VkSemaphore semaphore; ASSERT_VK_SUCCESS(vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore)); VkPipelineStageFlags stage_flags = VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV; VkSubmitInfo submit_info = {}; // Signal the semaphore so the next test can wait on it. submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = nullptr; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = &stage_flags; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubmitInfo-pWaitDstStageMask-02089"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubmitInfo-pWaitDstStageMask-02090"); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkPipelineShaderStageCreateInfo meshStage = {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO}; meshStage = vs.GetStageCreateInfo(); meshStage.stage = VK_SHADER_STAGE_MESH_BIT_NV; VkPipelineShaderStageCreateInfo taskStage = {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO}; taskStage = vs.GetStageCreateInfo(); taskStage.stage = VK_SHADER_STAGE_TASK_BIT_NV; // mesh and task shaders not supported const auto break_vp = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {meshStage, taskStage, vs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest( *this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector<std::string>({"VUID-VkPipelineShaderStageCreateInfo-pName-00707", "VUID-VkPipelineShaderStageCreateInfo-pName-00707", "VUID-VkPipelineShaderStageCreateInfo-stage-02091", "VUID-VkPipelineShaderStageCreateInfo-stage-02092"})); vkDestroyEvent(m_device->device(), event, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); } TEST_F(VkLayerTest, InlineUniformBlockEXT) { TEST_DESCRIPTION("Test VK_EXT_inline_uniform_block."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array<const char *, 1> required_device_extensions = {{VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } // Enable descriptor indexing if supported, but don't require it. bool supportsDescriptorIndexing = DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); if (supportsDescriptorIndexing) { m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); auto descriptor_indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(); void *pNext = supportsDescriptorIndexing ? &descriptor_indexing_features : nullptr; // Create a device that enables inline_uniform_block auto inline_uniform_block_features = lvl_init_struct<VkPhysicalDeviceInlineUniformBlockFeaturesEXT>(pNext); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&inline_uniform_block_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); assert(vkGetPhysicalDeviceProperties2KHR != nullptr); // Get the inline uniform block limits auto inline_uniform_props = lvl_init_struct<VkPhysicalDeviceInlineUniformBlockPropertiesEXT>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&inline_uniform_props); vkGetPhysicalDeviceProperties2KHR(gpu(), &prop2); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); VkDescriptorSetLayoutBinding dslb = {}; std::vector<VkDescriptorSetLayoutBinding> dslb_vec = {}; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; VkDescriptorSetLayout ds_layout = {}; // Test too many bindings dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; dslb.descriptorCount = 4; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; uint32_t maxBlocks = std::max(inline_uniform_props.maxPerStageDescriptorInlineUniformBlocks, inline_uniform_props.maxDescriptorSetInlineUniformBlocks); for (uint32_t i = 0; i < 1 + maxBlocks; ++i) { dslb.binding = i; dslb_vec.push_back(dslb); } ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217"); VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, nullptr); ds_layout = VK_NULL_HANDLE; // Single binding that's too large and is not a multiple of 4 dslb.binding = 0; dslb.descriptorCount = inline_uniform_props.maxInlineUniformBlockSize + 1; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &dslb; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02209"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02210"); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, nullptr); ds_layout = VK_NULL_HANDLE; // Pool size must be a multiple of 4 VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; ds_type_count.descriptorCount = 33; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = 0; ds_pool_ci.maxSets = 2; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolSize-type-02218"); err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); m_errorMonitor->VerifyFound(); if (ds_pool) { vkDestroyDescriptorPool(m_device->handle(), ds_pool, nullptr); ds_pool = VK_NULL_HANDLE; } // Create a valid pool ds_type_count.descriptorCount = 32; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); m_errorMonitor->VerifyNotFound(); // Create two valid sets with 8 bytes each dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; dslb.descriptorCount = 8; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = &dslb_vec[0]; err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); m_errorMonitor->VerifyNotFound(); VkDescriptorSet descriptor_sets[2]; VkDescriptorSetLayout set_layouts[2] = {ds_layout, ds_layout}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 2; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = set_layouts; err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, descriptor_sets); m_errorMonitor->VerifyNotFound(); // Test invalid VkWriteDescriptorSet parameters (array element and size must be multiple of 4) VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_sets[0]; descriptor_write.dstBinding = 0; descriptor_write.dstArrayElement = 0; descriptor_write.descriptorCount = 3; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; uint32_t dummyData[8] = {}; VkWriteDescriptorSetInlineUniformBlockEXT write_inline_uniform = {}; write_inline_uniform.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT; write_inline_uniform.dataSize = 3; write_inline_uniform.pData = &dummyData[0]; descriptor_write.pNext = &write_inline_uniform; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-02220"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); descriptor_write.dstArrayElement = 1; descriptor_write.descriptorCount = 4; write_inline_uniform.dataSize = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-02219"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); descriptor_write.pNext = nullptr; descriptor_write.dstArrayElement = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-02221"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); descriptor_write.pNext = &write_inline_uniform; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); // Test invalid VkCopyDescriptorSet parameters (array element and size must be multiple of 4) VkCopyDescriptorSet copy_ds_update = {}; copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = descriptor_sets[0]; copy_ds_update.srcBinding = 0; copy_ds_update.srcArrayElement = 0; copy_ds_update.dstSet = descriptor_sets[1]; copy_ds_update.dstBinding = 0; copy_ds_update.dstArrayElement = 0; copy_ds_update.descriptorCount = 4; copy_ds_update.srcArrayElement = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCopyDescriptorSet-srcBinding-02223"); vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); copy_ds_update.srcArrayElement = 0; copy_ds_update.dstArrayElement = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCopyDescriptorSet-dstBinding-02224"); vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); copy_ds_update.dstArrayElement = 0; copy_ds_update.descriptorCount = 5; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCopyDescriptorSet-srcBinding-02225"); vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); copy_ds_update.descriptorCount = 4; vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyNotFound(); vkDestroyDescriptorPool(m_device->handle(), ds_pool, nullptr); vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, nullptr); } TEST_F(VkLayerTest, FramebufferMixedSamplesNV) { TEST_DESCRIPTION("Verify VK_NV_framebuffer_mixed_samples."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); struct TestCase { VkSampleCountFlagBits color_samples; VkSampleCountFlagBits depth_samples; VkSampleCountFlagBits raster_samples; VkBool32 depth_test; VkBool32 sample_shading; uint32_t table_count; bool positiveTest; std::string vuid; }; std::vector<TestCase> test_cases = { {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_8_BIT, VK_FALSE, VK_FALSE, 4, false, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_8_BIT, VK_FALSE, VK_FALSE, 2, true, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, VK_TRUE, VK_FALSE, 1, false, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_8_BIT, VK_SAMPLE_COUNT_8_BIT, VK_TRUE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_1_BIT, VK_FALSE, VK_FALSE, 1, false, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_TRUE, 1, false, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757"}}; for (const auto &test_case : test_cases) { VkAttachmentDescription att[2] = {{}, {}}; att[0].format = VK_FORMAT_R8G8B8A8_UNORM; att[0].samples = test_case.color_samples; att[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; att[1].format = VK_FORMAT_D24_UNORM_S8_UINT; att[1].samples = test_case.depth_samples; att[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkAttachmentReference cr = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference dr = {1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription sp = {}; sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; sp.colorAttachmentCount = 1; sp.pColorAttachments = &cr; sp.pResolveAttachments = NULL; sp.pDepthStencilAttachment = &dr; VkRenderPassCreateInfo rpi = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO}; rpi.attachmentCount = 2; rpi.pAttachments = att; rpi.subpassCount = 1; rpi.pSubpasses = &sp; VkRenderPass rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pDepthStencilAttachment-01418"); VkResult err = vkCreateRenderPass(m_device->device(), &rpi, nullptr, &rp); m_errorMonitor->VerifyNotFound(); ASSERT_VK_SUCCESS(err); VkPipelineDepthStencilStateCreateInfo ds = {VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO}; VkPipelineCoverageModulationStateCreateInfoNV cmi = {VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV}; // Create a dummy modulation table that can be used for the positive // coverageModulationTableCount test. std::vector<float> cm_table{}; const auto break_samples = [&cmi, &rp, &ds, &cm_table, &test_case](CreatePipelineHelper &helper) { cm_table.resize(test_case.raster_samples / test_case.color_samples); cmi.flags = 0; cmi.coverageModulationTableEnable = (test_case.table_count > 1); cmi.coverageModulationTableCount = test_case.table_count; cmi.pCoverageModulationTable = cm_table.data(); ds.depthTestEnable = test_case.depth_test; helper.pipe_ms_state_ci_.pNext = &cmi; helper.pipe_ms_state_ci_.rasterizationSamples = test_case.raster_samples; helper.pipe_ms_state_ci_.sampleShadingEnable = test_case.sample_shading; helper.gp_ci_.renderPass = rp; helper.gp_ci_.pDepthStencilState = &ds; }; CreatePipelineHelper::OneshotTest(*this, break_samples, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuid, test_case.positiveTest); vkDestroyRenderPass(m_device->device(), rp, nullptr); } } TEST_F(VkLayerTest, FramebufferMixedSamples) { TEST_DESCRIPTION("Verify that the expected VUIds are hits when VK_NV_framebuffer_mixed_samples is disabled."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); struct TestCase { VkSampleCountFlagBits color_samples; VkSampleCountFlagBits depth_samples; VkSampleCountFlagBits raster_samples; bool positiveTest; }; std::vector<TestCase> test_cases = { {VK_SAMPLE_COUNT_2_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, false}, // Fails vkCreateRenderPass and vkCreateGraphicsPipeline {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, false}, // Fails vkCreateGraphicsPipeline {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, true} // Pass }; for (const auto &test_case : test_cases) { VkAttachmentDescription att[2] = {{}, {}}; att[0].format = VK_FORMAT_R8G8B8A8_UNORM; att[0].samples = test_case.color_samples; att[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; att[1].format = VK_FORMAT_D24_UNORM_S8_UINT; att[1].samples = test_case.depth_samples; att[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkAttachmentReference cr = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference dr = {1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription sp = {}; sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; sp.colorAttachmentCount = 1; sp.pColorAttachments = &cr; sp.pResolveAttachments = NULL; sp.pDepthStencilAttachment = &dr; VkRenderPassCreateInfo rpi = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO}; rpi.attachmentCount = 2; rpi.pAttachments = att; rpi.subpassCount = 1; rpi.pSubpasses = &sp; VkRenderPass rp; if (test_case.color_samples == test_case.depth_samples) { m_errorMonitor->ExpectSuccess(); } else { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pDepthStencilAttachment-01418"); } VkResult err = vkCreateRenderPass(m_device->device(), &rpi, nullptr, &rp); if (test_case.color_samples == test_case.depth_samples) { m_errorMonitor->VerifyNotFound(); } else { m_errorMonitor->VerifyFound(); continue; } ASSERT_VK_SUCCESS(err); VkPipelineDepthStencilStateCreateInfo ds = {VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO}; const auto break_samples = [&rp, &ds, &test_case](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.rasterizationSamples = test_case.raster_samples; helper.gp_ci_.renderPass = rp; helper.gp_ci_.pDepthStencilState = &ds; }; CreatePipelineHelper::OneshotTest(*this, break_samples, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757", test_case.positiveTest); vkDestroyRenderPass(m_device->device(), rp, nullptr); } } TEST_F(VkLayerTest, FragmentCoverageToColorNV) { TEST_DESCRIPTION("Verify VK_NV_fragment_coverage_to_color."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); struct TestCase { VkFormat format; VkBool32 enabled; uint32_t location; bool positive; }; const std::array<TestCase, 9> test_cases = {{ {VK_FORMAT_R8G8B8A8_UNORM, VK_FALSE, 0, true}, {VK_FORMAT_R8_UINT, VK_TRUE, 1, true}, {VK_FORMAT_R16_UINT, VK_TRUE, 1, true}, {VK_FORMAT_R16_SINT, VK_TRUE, 1, true}, {VK_FORMAT_R32_UINT, VK_TRUE, 1, true}, {VK_FORMAT_R32_SINT, VK_TRUE, 1, true}, {VK_FORMAT_R32_SINT, VK_TRUE, 2, false}, {VK_FORMAT_R8_SINT, VK_TRUE, 3, false}, {VK_FORMAT_R8G8B8A8_UNORM, VK_TRUE, 1, false}, }}; for (const auto &test_case : test_cases) { std::array<VkAttachmentDescription, 2> att = {{{}, {}}}; att[0].format = VK_FORMAT_R8G8B8A8_UNORM; att[0].samples = VK_SAMPLE_COUNT_1_BIT; att[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; att[1].format = VK_FORMAT_R8G8B8A8_UNORM; att[1].samples = VK_SAMPLE_COUNT_1_BIT; att[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[1].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; if (test_case.location < att.size()) { att[test_case.location].format = test_case.format; } const std::array<VkAttachmentReference, 3> cr = {{{0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}}}; VkSubpassDescription sp = {}; sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; sp.colorAttachmentCount = cr.size(); sp.pColorAttachments = cr.data(); VkRenderPassCreateInfo rpi = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO}; rpi.attachmentCount = att.size(); rpi.pAttachments = att.data(); rpi.subpassCount = 1; rpi.pSubpasses = &sp; const std::array<VkPipelineColorBlendAttachmentState, 3> cba = {{{}, {}, {}}}; VkPipelineColorBlendStateCreateInfo cbi = {VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO}; cbi.attachmentCount = cba.size(); cbi.pAttachments = cba.data(); VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpi, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkPipelineCoverageToColorStateCreateInfoNV cci = {VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV}; const auto break_samples = [&cci, &cbi, &rp, &test_case](CreatePipelineHelper &helper) { cci.coverageToColorEnable = test_case.enabled; cci.coverageToColorLocation = test_case.location; helper.pipe_ms_state_ci_.pNext = &cci; helper.gp_ci_.renderPass = rp; helper.gp_ci_.pColorBlendState = &cbi; }; CreatePipelineHelper::OneshotTest(*this, break_samples, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404", test_case.positive); vkDestroyRenderPass(m_device->device(), rp, nullptr); } } TEST_F(VkPositiveLayerTest, RayTracingPipelineNV) { TEST_DESCRIPTION("Test VK_NV_ray_tracing."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array<const char *, 2> required_device_extensions = { {VK_NV_RAY_TRACING_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); static const char rayGenShaderText[] = "#version 460 core \n" "#extension GL_NV_ray_tracing : require \n" "layout(set = 0, binding = 0, rgba8) uniform image2D image; \n" "layout(set = 0, binding = 1) uniform accelerationStructureNV as; \n" " \n" "layout(location = 0) rayPayloadNV float payload; \n" " \n" "void main() \n" "{ \n" " vec4 col = vec4(0, 0, 0, 1); \n" " \n" " vec3 origin = vec3(float(gl_LaunchIDNV.x)/float(gl_LaunchSizeNV.x), float(gl_LaunchIDNV.y)/float(gl_LaunchSizeNV.y), " "1.0); \n" " vec3 dir = vec3(0.0, 0.0, -1.0); \n" " \n" " payload = 0.5; \n" " traceNV(as, gl_RayFlagsCullBackFacingTrianglesNV, 0xff, 0, 1, 0, origin, 0.0, dir, 1000.0, 0); \n" " \n" " col.y = payload; \n" " \n" " imageStore(image, ivec2(gl_LaunchIDNV.xy), col); \n" "}\n"; static char const closestHitShaderText[] = "#version 460 core \n" "#extension GL_NV_ray_tracing : require \n" "layout(location = 0) rayPayloadInNV float hitValue; \n" " \n" "void main() { \n" " hitValue = 1.0; \n" "} \n"; static char const missShaderText[] = "#version 460 core \n" "#extension GL_NV_ray_tracing : require \n" "layout(location = 0) rayPayloadInNV float hitValue; \n" " \n" "void main() { \n" " hitValue = 0.0; \n" "} \n"; VkShaderObj rgs(m_device, rayGenShaderText, VK_SHADER_STAGE_RAYGEN_BIT_NV, this); VkShaderObj chs(m_device, closestHitShaderText, VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV, this); VkShaderObj mis(m_device, missShaderText, VK_SHADER_STAGE_MISS_BIT_NV, this); VkPipelineShaderStageCreateInfo rayStages[3]; memset(&rayStages[0], 0, sizeof(rayStages)); rayStages[0] = rgs.GetStageCreateInfo(); rayStages[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; rayStages[1] = chs.GetStageCreateInfo(); rayStages[1].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV; rayStages[2] = mis.GetStageCreateInfo(); rayStages[2].stage = VK_SHADER_STAGE_MISS_BIT_NV; VkRayTracingShaderGroupCreateInfoNV groups[3]; memset(&groups[0], 0, sizeof(groups)); groups[0].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV; groups[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; groups[0].generalShader = 0; groups[0].closestHitShader = VK_SHADER_UNUSED_NV; groups[0].anyHitShader = VK_SHADER_UNUSED_NV; groups[0].intersectionShader = VK_SHADER_UNUSED_NV; groups[1].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV; groups[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV; groups[1].generalShader = VK_SHADER_UNUSED_NV; groups[1].closestHitShader = 1; groups[1].anyHitShader = VK_SHADER_UNUSED_NV; groups[1].intersectionShader = VK_SHADER_UNUSED_NV; groups[2].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV; groups[2].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; groups[2].generalShader = 2; groups[2].closestHitShader = VK_SHADER_UNUSED_NV; groups[2].anyHitShader = VK_SHADER_UNUSED_NV; groups[2].intersectionShader = VK_SHADER_UNUSED_NV; const uint32_t bindingCount = 2; VkDescriptorSetLayoutBinding binding[bindingCount] = {}; binding[0].binding = 0; binding[0].descriptorCount = 1; binding[0].stageFlags = VK_SHADER_STAGE_RAYGEN_BIT_NV; binding[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; binding[1].binding = 1; binding[1].descriptorCount = 1; binding[1].stageFlags = VK_SHADER_STAGE_RAYGEN_BIT_NV; binding[1].descriptorType = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV; VkDescriptorSetLayoutCreateInfo descriptorSetEntry = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO}; descriptorSetEntry.bindingCount = bindingCount; descriptorSetEntry.pBindings = binding; VkDescriptorSetLayout descriptorSetLayout; VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &descriptorSetEntry, 0, &descriptorSetLayout); ASSERT_VK_SUCCESS(err); VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = {VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO}; pipelineLayoutCreateInfo.setLayoutCount = 1; pipelineLayoutCreateInfo.pSetLayouts = &descriptorSetLayout; VkPipelineLayout pipelineLayout; err = vkCreatePipelineLayout(m_device->device(), &pipelineLayoutCreateInfo, 0, &pipelineLayout); ASSERT_VK_SUCCESS(err); PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV = (PFN_vkCreateRayTracingPipelinesNV)vkGetInstanceProcAddr(instance(), "vkCreateRayTracingPipelinesNV"); VkRayTracingPipelineCreateInfoNV rayPipelineInfo = {VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV}; rayPipelineInfo.layout = pipelineLayout; rayPipelineInfo.stageCount = 3; rayPipelineInfo.pStages = &rayStages[0]; rayPipelineInfo.groupCount = 3; rayPipelineInfo.pGroups = &groups[0]; VkPipeline rayPipeline; err = vkCreateRayTracingPipelinesNV(m_device->device(), VK_NULL_HANDLE, 1, &rayPipelineInfo, 0, &rayPipeline); ASSERT_VK_SUCCESS(err); vkDestroyPipeline(m_device->device(), rayPipeline, 0); vkDestroyPipelineLayout(m_device->device(), pipelineLayout, 0); vkDestroyDescriptorSetLayout(m_device->device(), descriptorSetLayout, 0); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreateYCbCrSampler) { TEST_DESCRIPTION("Verify YCbCr sampler creation."); // Test requires API 1.1 or (API 1.0 + SamplerYCbCr extension). Request API 1.1 SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // In case we don't have API 1.1+, try enabling the extension directly (and it's dependencies) if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); // Verify we have the requested support bool ycbcr_support = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) || (DeviceValidationVersion() >= VK_API_VERSION_1_1)); if (!ycbcr_support) { printf("%s Did not find required device extension %s; test skipped.\n", kSkipPrefix, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); return; } VkSamplerYcbcrConversion ycbcr_conv = VK_NULL_HANDLE; VkSamplerYcbcrConversionCreateInfo sycci = {}; sycci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO; sycci.format = VK_FORMAT_UNDEFINED; sycci.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; sycci.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01649"); vkCreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv); m_errorMonitor->VerifyFound(); } #ifdef VK_USE_PLATFORM_ANDROID_KHR // or ifdef ANDROID? #include "android_ndk_types.h" TEST_F(VkLayerTest, AndroidHardwareBufferImageCreate) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer image create info."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkImage img = VK_NULL_HANDLE; auto reset_img = [&img, dev]() { if (VK_NULL_HANDLE != img) vkDestroyImage(dev, img, NULL); img = VK_NULL_HANDLE; }; VkImageCreateInfo ici = {}; ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ici.pNext = nullptr; ici.imageType = VK_IMAGE_TYPE_2D; ici.arrayLayers = 1; ici.extent = {64, 64, 1}; ici.format = VK_FORMAT_UNDEFINED; ici.mipLevels = 1; ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; ici.samples = VK_SAMPLE_COUNT_1_BIT; ici.tiling = VK_IMAGE_TILING_OPTIMAL; ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; // undefined format m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-01975"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); // also undefined format VkExternalFormatANDROID efa = {}; efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID; efa.externalFormat = 0; ici.pNext = &efa; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-01975"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); // undefined format with an unknown external format efa.externalFormat = 0xBADC0DE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkExternalFormatANDROID-externalFormat-01894"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); AHardwareBuffer *ahb; AHardwareBuffer_Desc ahb_desc = {}; ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ahb_desc.width = 64; ahb_desc.height = 64; ahb_desc.layers = 1; // Allocate an AHardwareBuffer AHardwareBuffer_allocate(&ahb_desc, &ahb); // Retrieve it's properties to make it's external format 'known' (AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM) VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {}; ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID; VkAndroidHardwareBufferPropertiesANDROID ahb_props = {}; ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; ahb_props.pNext = &ahb_fmt_props; PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vkGetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID"); ASSERT_TRUE(pfn_GetAHBProps != nullptr); pfn_GetAHBProps(dev, ahb, &ahb_props); // a defined image format with a non-zero external format ici.format = VK_FORMAT_R8G8B8A8_UNORM; efa.externalFormat = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-01974"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); ici.format = VK_FORMAT_UNDEFINED; // external format while MUTABLE ici.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-02396"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); ici.flags = 0; // external format while usage other than SAMPLED ici.usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-02397"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; // external format while tiline other than OPTIMAL ici.tiling = VK_IMAGE_TILING_LINEAR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-02398"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); ici.tiling = VK_IMAGE_TILING_OPTIMAL; // imageType VkExternalMemoryImageCreateInfo emici = {}; emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO; emici.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; ici.pNext = &emici; // remove efa from chain, insert emici ici.format = VK_FORMAT_R8G8B8A8_UNORM; ici.imageType = VK_IMAGE_TYPE_3D; ici.extent = {64, 64, 64}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-02393"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); // wrong mipLevels ici.imageType = VK_IMAGE_TYPE_2D; ici.extent = {64, 64, 1}; ici.mipLevels = 6; // should be 7 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-02394"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); } TEST_F(VkLayerTest, AndroidHardwareBufferFetchUnboundImageInfo) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer retreive image properties while memory unbound."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkImage img = VK_NULL_HANDLE; auto reset_img = [&img, dev]() { if (VK_NULL_HANDLE != img) vkDestroyImage(dev, img, NULL); img = VK_NULL_HANDLE; }; VkImageCreateInfo ici = {}; ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ici.pNext = nullptr; ici.imageType = VK_IMAGE_TYPE_2D; ici.arrayLayers = 1; ici.extent = {64, 64, 1}; ici.format = VK_FORMAT_R8G8B8A8_UNORM; ici.mipLevels = 1; ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; ici.samples = VK_SAMPLE_COUNT_1_BIT; ici.tiling = VK_IMAGE_TILING_LINEAR; ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; VkExternalMemoryImageCreateInfo emici = {}; emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO; emici.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; ici.pNext = &emici; m_errorMonitor->ExpectSuccess(); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyNotFound(); // attempt to fetch layout from unbound image VkImageSubresource sub_rsrc = {}; sub_rsrc.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkSubresourceLayout sub_layout = {}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-image-01895"); vkGetImageSubresourceLayout(dev, img, &sub_rsrc, &sub_layout); m_errorMonitor->VerifyFound(); // attempt to get memory reqs from unbound image VkImageMemoryRequirementsInfo2 imri = {}; imri.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2; imri.image = img; VkMemoryRequirements2 mem_reqs = {}; mem_reqs.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryRequirementsInfo2-image-01897"); vkGetImageMemoryRequirements2(dev, &imri, &mem_reqs); m_errorMonitor->VerifyFound(); reset_img(); } TEST_F(VkLayerTest, AndroidHardwareBufferMemoryAllocation) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer memory allocation."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkImage img = VK_NULL_HANDLE; auto reset_img = [&img, dev]() { if (VK_NULL_HANDLE != img) vkDestroyImage(dev, img, NULL); img = VK_NULL_HANDLE; }; VkDeviceMemory mem_handle = VK_NULL_HANDLE; auto reset_mem = [&mem_handle, dev]() { if (VK_NULL_HANDLE != mem_handle) vkFreeMemory(dev, mem_handle, NULL); mem_handle = VK_NULL_HANDLE; }; PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vkGetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID"); ASSERT_TRUE(pfn_GetAHBProps != nullptr); // AHB structs AHardwareBuffer *ahb = nullptr; AHardwareBuffer_Desc ahb_desc = {}; VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {}; ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID; VkAndroidHardwareBufferPropertiesANDROID ahb_props = {}; ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; ahb_props.pNext = &ahb_fmt_props; VkImportAndroidHardwareBufferInfoANDROID iahbi = {}; iahbi.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID; // destroy and re-acquire an AHB, and fetch it's properties auto recreate_ahb = [&ahb, &iahbi, &ahb_desc, &ahb_props, dev, pfn_GetAHBProps]() { if (ahb) AHardwareBuffer_release(ahb); ahb = nullptr; AHardwareBuffer_allocate(&ahb_desc, &ahb); pfn_GetAHBProps(dev, ahb, &ahb_props); iahbi.buffer = ahb; }; // Allocate an AHardwareBuffer ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ahb_desc.width = 64; ahb_desc.height = 64; ahb_desc.layers = 1; recreate_ahb(); // Create an image w/ external format VkExternalFormatANDROID efa = {}; efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID; efa.externalFormat = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; VkImageCreateInfo ici = {}; ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ici.pNext = &efa; ici.imageType = VK_IMAGE_TYPE_2D; ici.arrayLayers = 1; ici.extent = {64, 64, 1}; ici.format = VK_FORMAT_UNDEFINED; ici.mipLevels = 1; ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; ici.mipLevels = 1; ici.samples = VK_SAMPLE_COUNT_1_BIT; ici.tiling = VK_IMAGE_TILING_OPTIMAL; ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; VkResult res = vkCreateImage(dev, &ici, NULL, &img); ASSERT_VK_SUCCESS(res); VkMemoryAllocateInfo mai = {}; mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mai.pNext = &iahbi; // Chained import struct mai.allocationSize = ahb_props.allocationSize; mai.memoryTypeIndex = 32; // Set index to match one of the bits in ahb_props for (int i = 0; i < 32; i++) { if (ahb_props.memoryTypeBits & (1 << i)) { mai.memoryTypeIndex = i; break; } } ASSERT_NE(32, mai.memoryTypeIndex); // Import w/ non-dedicated memory allocation // Import requires format AHB_FMT_BLOB and usage AHB_USAGE_GPU_DATA_BUFFER m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02384"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); // Allocation size mismatch ahb_desc.format = AHARDWAREBUFFER_FORMAT_BLOB; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; recreate_ahb(); mai.allocationSize = ahb_props.allocationSize + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-allocationSize-02383"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); mai.allocationSize = ahb_props.allocationSize; reset_mem(); // memoryTypeIndex mismatch mai.memoryTypeIndex++; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); mai.memoryTypeIndex--; reset_mem(); // Insert dedicated image memory allocation to mai chain VkMemoryDedicatedAllocateInfo mdai = {}; mdai.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO; mdai.image = img; mdai.buffer = VK_NULL_HANDLE; mdai.pNext = mai.pNext; mai.pNext = &mdai; // Dedicated allocation with unmatched usage bits ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT; recreate_ahb(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02390"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); // Dedicated allocation with incomplete mip chain reset_img(); ici.mipLevels = 2; vkCreateImage(dev, &ici, NULL, &img); mdai.image = img; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE; recreate_ahb(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02389"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); // Dedicated allocation with mis-matched dimension ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ahb_desc.height = 32; ahb_desc.width = 128; recreate_ahb(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02388"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); // Dedicated allocation with mis-matched VkFormat ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ahb_desc.height = 64; ahb_desc.width = 64; recreate_ahb(); ici.mipLevels = 1; ici.format = VK_FORMAT_B8G8R8A8_UNORM; ici.pNext = NULL; VkImage img2; vkCreateImage(dev, &ici, NULL, &img2); mdai.image = img2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02387"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); vkDestroyImage(dev, img2, NULL); mdai.image = img; reset_mem(); // Missing required ahb usage ahb_desc.usage = AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884"); recreate_ahb(); m_errorMonitor->VerifyFound(); // Dedicated allocation with missing usage bits // Setting up this test also triggers a slew of others m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02390"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-allocationSize-02383"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02386"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); // Non-import allocation - replace import struct in chain with export struct VkExportMemoryAllocateInfo emai = {}; emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO; emai.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; mai.pNext = &emai; emai.pNext = &mdai; // still dedicated mdai.pNext = nullptr; // Export with allocation size non-zero ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; recreate_ahb(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-01874"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); AHardwareBuffer_release(ahb); reset_mem(); reset_img(); } TEST_F(VkLayerTest, AndroidHardwareBufferCreateYCbCrSampler) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer YCbCr sampler creation."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkSamplerYcbcrConversion ycbcr_conv = VK_NULL_HANDLE; VkSamplerYcbcrConversionCreateInfo sycci = {}; sycci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO; sycci.format = VK_FORMAT_UNDEFINED; sycci.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; sycci.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904"); vkCreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv); m_errorMonitor->VerifyFound(); VkExternalFormatANDROID efa = {}; efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID; efa.externalFormat = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; sycci.format = VK_FORMAT_R8G8B8A8_UNORM; sycci.pNext = &efa; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904"); vkCreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, AndroidHardwareBufferPhysDevImageFormatProp2) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer GetPhysicalDeviceImageFormatProperties."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping test\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); if ((m_instance_api_version < VK_API_VERSION_1_1) && !InstanceExtensionEnabled(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { printf("%s %s extension not supported, skipping test\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } VkImageFormatProperties2 ifp = {}; ifp.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2; VkPhysicalDeviceImageFormatInfo2 pdifi = {}; pdifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2; pdifi.format = VK_FORMAT_R8G8B8A8_UNORM; pdifi.tiling = VK_IMAGE_TILING_OPTIMAL; pdifi.type = VK_IMAGE_TYPE_2D; pdifi.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; VkAndroidHardwareBufferUsageANDROID ahbu = {}; ahbu.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID; ahbu.androidHardwareBufferUsage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ifp.pNext = &ahbu; // AHB_usage chained to input without a matching external image format struc chained to output m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868"); vkGetPhysicalDeviceImageFormatProperties2(m_device->phy().handle(), &pdifi, &ifp); m_errorMonitor->VerifyFound(); // output struct chained, but does not include VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID usage VkPhysicalDeviceExternalImageFormatInfo pdeifi = {}; pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO; pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT; pdifi.pNext = &pdeifi; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868"); vkGetPhysicalDeviceImageFormatProperties2(m_device->phy().handle(), &pdifi, &ifp); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, AndroidHardwareBufferCreateImageView) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer image view creation."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); // Expect no validation errors during setup m_errorMonitor->ExpectSuccess(); // Allocate an AHB and fetch its properties AHardwareBuffer *ahb = nullptr; AHardwareBuffer_Desc ahb_desc = {}; ahb_desc.format = AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ahb_desc.width = 64; ahb_desc.height = 64; ahb_desc.layers = 1; AHardwareBuffer_allocate(&ahb_desc, &ahb); // Retrieve AHB properties to make it's external format 'known' VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {}; ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID; VkAndroidHardwareBufferPropertiesANDROID ahb_props = {}; ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; ahb_props.pNext = &ahb_fmt_props; PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vkGetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID"); ASSERT_TRUE(pfn_GetAHBProps != nullptr); pfn_GetAHBProps(dev, ahb, &ahb_props); AHardwareBuffer_release(ahb); // Give image an external format VkExternalFormatANDROID efa = {}; efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID; efa.externalFormat = AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM; // Create the image VkImage img = VK_NULL_HANDLE; VkImageCreateInfo ici = {}; ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ici.pNext = &efa; ici.imageType = VK_IMAGE_TYPE_2D; ici.arrayLayers = 1; ici.extent = {64, 64, 1}; ici.format = VK_FORMAT_UNDEFINED; ici.mipLevels = 1; ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; ici.samples = VK_SAMPLE_COUNT_1_BIT; ici.tiling = VK_IMAGE_TILING_OPTIMAL; ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; vkCreateImage(dev, &ici, NULL, &img); // Set up memory allocation VkDeviceMemory img_mem = VK_NULL_HANDLE; VkMemoryAllocateInfo mai = {}; mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mai.allocationSize = 64 * 64 * 4; mai.memoryTypeIndex = 0; vkAllocateMemory(dev, &mai, NULL, &img_mem); // Bind image to memory vkBindImageMemory(dev, img, img_mem, 0); // Create a YCbCr conversion, with different external format, chain to view VkSamplerYcbcrConversion ycbcr_conv = VK_NULL_HANDLE; VkSamplerYcbcrConversionCreateInfo sycci = {}; efa.externalFormat = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; sycci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO; sycci.pNext = &efa; sycci.format = VK_FORMAT_UNDEFINED; sycci.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; sycci.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL; vkCreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv); VkSamplerYcbcrConversionInfo syci = {}; syci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO; syci.conversion = ycbcr_conv; // Create a view VkImageView image_view = VK_NULL_HANDLE; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.pNext = &syci; ivci.image = img; ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_UNDEFINED; ivci.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}; auto reset_view = [&image_view, dev]() { if (VK_NULL_HANDLE != image_view) vkDestroyImageView(dev, image_view, NULL); image_view = VK_NULL_HANDLE; }; // Up to this point, no errors expected m_errorMonitor->VerifyNotFound(); // Chained ycbcr conversion has different (external) format than image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-02400"); // Also causes "unsupported format" - should be removed in future spec update m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-None-02273"); vkCreateImageView(dev, &ivci, NULL, &image_view); m_errorMonitor->VerifyFound(); reset_view(); vkDestroySamplerYcbcrConversion(dev, ycbcr_conv, NULL); efa.externalFormat = AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM; vkCreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv); syci.conversion = ycbcr_conv; // View component swizzle not IDENTITY ivci.components.r = VK_COMPONENT_SWIZZLE_B; ivci.components.b = VK_COMPONENT_SWIZZLE_R; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-02401"); // Also causes "unsupported format" - should be removed in future spec update m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-None-02273"); vkCreateImageView(dev, &ivci, NULL, &image_view); m_errorMonitor->VerifyFound(); reset_view(); ivci.components.r = VK_COMPONENT_SWIZZLE_IDENTITY; ivci.components.b = VK_COMPONENT_SWIZZLE_IDENTITY; // View with external format, when format is not UNDEFINED ivci.format = VK_FORMAT_R5G6B5_UNORM_PACK16; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-02399"); // Also causes "view format different from image format" m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01019"); vkCreateImageView(dev, &ivci, NULL, &image_view); m_errorMonitor->VerifyFound(); reset_view(); vkDestroySamplerYcbcrConversion(dev, ycbcr_conv, NULL); vkDestroyImageView(dev, image_view, NULL); vkDestroyImage(dev, img, NULL); vkFreeMemory(dev, img_mem, NULL); } TEST_F(VkLayerTest, AndroidHardwareBufferImportBuffer) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer import as buffer."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkDeviceMemory mem_handle = VK_NULL_HANDLE; auto reset_mem = [&mem_handle, dev]() { if (VK_NULL_HANDLE != mem_handle) vkFreeMemory(dev, mem_handle, NULL); mem_handle = VK_NULL_HANDLE; }; PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vkGetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID"); ASSERT_TRUE(pfn_GetAHBProps != nullptr); // AHB structs AHardwareBuffer *ahb = nullptr; AHardwareBuffer_Desc ahb_desc = {}; VkAndroidHardwareBufferPropertiesANDROID ahb_props = {}; ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; VkImportAndroidHardwareBufferInfoANDROID iahbi = {}; iahbi.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID; // Allocate an AHardwareBuffer ahb_desc.format = AHARDWAREBUFFER_FORMAT_BLOB; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE; ahb_desc.width = 512; ahb_desc.height = 1; ahb_desc.layers = 1; AHardwareBuffer_allocate(&ahb_desc, &ahb); pfn_GetAHBProps(dev, ahb, &ahb_props); iahbi.buffer = ahb; // Create export and import buffers VkExternalMemoryBufferCreateInfo ext_buf_info = {}; ext_buf_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR; ext_buf_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT; VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.pNext = &ext_buf_info; bci.size = ahb_props.allocationSize; bci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VkBuffer buf = VK_NULL_HANDLE; vkCreateBuffer(dev, &bci, NULL, &buf); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(dev, buf, &mem_reqs); // Allocation info VkMemoryAllocateInfo mai = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, mem_reqs, 0); mai.pNext = &iahbi; // Chained import struct // Import as buffer requires format AHB_FMT_BLOB and usage AHB_USAGE_GPU_DATA_BUFFER m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881"); // Also causes "non-dedicated allocation format/usage" error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02384"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); AHardwareBuffer_release(ahb); reset_mem(); vkDestroyBuffer(dev, buf, NULL); } TEST_F(VkLayerTest, AndroidHardwareBufferExporttBuffer) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer export memory as AHB."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkDeviceMemory mem_handle = VK_NULL_HANDLE; // Allocate device memory, no linked export struct indicating AHB handle type VkMemoryAllocateInfo mai = {}; mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mai.allocationSize = 65536; mai.memoryTypeIndex = 0; vkAllocateMemory(dev, &mai, NULL, &mem_handle); PFN_vkGetMemoryAndroidHardwareBufferANDROID pfn_GetMemAHB = (PFN_vkGetMemoryAndroidHardwareBufferANDROID)vkGetDeviceProcAddr(dev, "vkGetMemoryAndroidHardwareBufferANDROID"); ASSERT_TRUE(pfn_GetMemAHB != nullptr); VkMemoryGetAndroidHardwareBufferInfoANDROID mgahbi = {}; mgahbi.sType = VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID; mgahbi.memory = mem_handle; AHardwareBuffer *ahb = nullptr; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882"); pfn_GetMemAHB(dev, &mgahbi, &ahb); m_errorMonitor->VerifyFound(); if (ahb) AHardwareBuffer_release(ahb); ahb = nullptr; if (VK_NULL_HANDLE != mem_handle) vkFreeMemory(dev, mem_handle, NULL); mem_handle = VK_NULL_HANDLE; // Add an export struct with AHB handle type to allocation info VkExportMemoryAllocateInfo emai = {}; emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO; emai.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; mai.pNext = &emai; // Create an image, do not bind memory VkImage img = VK_NULL_HANDLE; VkImageCreateInfo ici = {}; ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ici.imageType = VK_IMAGE_TYPE_2D; ici.arrayLayers = 1; ici.extent = {128, 128, 1}; ici.format = VK_FORMAT_R8G8B8A8_UNORM; ici.mipLevels = 1; ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; ici.samples = VK_SAMPLE_COUNT_1_BIT; ici.tiling = VK_IMAGE_TILING_OPTIMAL; ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; vkCreateImage(dev, &ici, NULL, &img); ASSERT_TRUE(VK_NULL_HANDLE != img); // Add image to allocation chain as dedicated info, re-allocate VkMemoryDedicatedAllocateInfo mdai = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO}; mdai.image = img; emai.pNext = &mdai; mai.allocationSize = 0; vkAllocateMemory(dev, &mai, NULL, &mem_handle); mgahbi.memory = mem_handle; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883"); pfn_GetMemAHB(dev, &mgahbi, &ahb); m_errorMonitor->VerifyFound(); if (ahb) AHardwareBuffer_release(ahb); if (VK_NULL_HANDLE != mem_handle) vkFreeMemory(dev, mem_handle, NULL); vkDestroyImage(dev, img, NULL); } #endif #if defined(ANDROID) && defined(VALIDATION_APK) const char *appTag = "VulkanLayerValidationTests"; static bool initialized = false; static bool active = false; // Convert Intents to argv // Ported from Hologram sample, only difference is flexible key std::vector<std::string> get_args(android_app &app, const char *intent_extra_data_key) { std::vector<std::string> args; JavaVM &vm = *app.activity->vm; JNIEnv *p_env; if (vm.AttachCurrentThread(&p_env, nullptr) != JNI_OK) return args; JNIEnv &env = *p_env; jobject activity = app.activity->clazz; jmethodID get_intent_method = env.GetMethodID(env.GetObjectClass(activity), "getIntent", "()Landroid/content/Intent;"); jobject intent = env.CallObjectMethod(activity, get_intent_method); jmethodID get_string_extra_method = env.GetMethodID(env.GetObjectClass(intent), "getStringExtra", "(Ljava/lang/String;)Ljava/lang/String;"); jvalue get_string_extra_args; get_string_extra_args.l = env.NewStringUTF(intent_extra_data_key); jstring extra_str = static_cast<jstring>(env.CallObjectMethodA(intent, get_string_extra_method, &get_string_extra_args)); std::string args_str; if (extra_str) { const char *extra_utf = env.GetStringUTFChars(extra_str, nullptr); args_str = extra_utf; env.ReleaseStringUTFChars(extra_str, extra_utf); env.DeleteLocalRef(extra_str); } env.DeleteLocalRef(get_string_extra_args.l); env.DeleteLocalRef(intent); vm.DetachCurrentThread(); // split args_str std::stringstream ss(args_str); std::string arg; while (std::getline(ss, arg, ' ')) { if (!arg.empty()) args.push_back(arg); } return args; } void addFullTestCommentIfPresent(const ::testing::TestInfo &test_info, std::string &error_message) { const char *const type_param = test_info.type_param(); const char *const value_param = test_info.value_param(); if (type_param != NULL || value_param != NULL) { error_message.append(", where "); if (type_param != NULL) { error_message.append("TypeParam = ").append(type_param); if (value_param != NULL) error_message.append(" and "); } if (value_param != NULL) { error_message.append("GetParam() = ").append(value_param); } } } // Inspired by https://github.com/google/googletest/blob/master/googletest/docs/AdvancedGuide.md class LogcatPrinter : public ::testing::EmptyTestEventListener { // Called before a test starts. virtual void OnTestStart(const ::testing::TestInfo &test_info) { __android_log_print(ANDROID_LOG_INFO, appTag, "[ RUN ] %s.%s", test_info.test_case_name(), test_info.name()); } // Called after a failed assertion or a SUCCEED() invocation. virtual void OnTestPartResult(const ::testing::TestPartResult &result) { // If the test part succeeded, we don't need to do anything. if (result.type() == ::testing::TestPartResult::kSuccess) return; __android_log_print(ANDROID_LOG_INFO, appTag, "%s in %s:%d %s", result.failed() ? "*** Failure" : "Success", result.file_name(), result.line_number(), result.summary()); } // Called after a test ends. virtual void OnTestEnd(const ::testing::TestInfo &info) { std::string result; if (info.result()->Passed()) { result.append("[ OK ]"); } else { result.append("[ FAILED ]"); } result.append(info.test_case_name()).append(".").append(info.name()); if (info.result()->Failed()) addFullTestCommentIfPresent(info, result); if (::testing::GTEST_FLAG(print_time)) { std::ostringstream os; os << info.result()->elapsed_time(); result.append(" (").append(os.str()).append(" ms)"); } __android_log_print(ANDROID_LOG_INFO, appTag, "%s", result.c_str()); }; }; static int32_t processInput(struct android_app *app, AInputEvent *event) { return 0; } static void processCommand(struct android_app *app, int32_t cmd) { switch (cmd) { case APP_CMD_INIT_WINDOW: { if (app->window) { initialized = true; } break; } case APP_CMD_GAINED_FOCUS: { active = true; break; } case APP_CMD_LOST_FOCUS: { active = false; break; } } } void android_main(struct android_app *app) { int vulkanSupport = InitVulkan(); if (vulkanSupport == 0) { __android_log_print(ANDROID_LOG_INFO, appTag, "==== FAILED ==== No Vulkan support found"); return; } app->onAppCmd = processCommand; app->onInputEvent = processInput; while (1) { int events; struct android_poll_source *source; while (ALooper_pollAll(active ? 0 : -1, NULL, &events, (void **)&source) >= 0) { if (source) { source->process(app, source); } if (app->destroyRequested != 0) { VkTestFramework::Finish(); return; } } if (initialized && active) { // Use the following key to send arguments to gtest, i.e. // --es args "--gtest_filter=-VkLayerTest.foo" const char key[] = "args"; std::vector<std::string> args = get_args(*app, key); std::string filter = ""; if (args.size() > 0) { __android_log_print(ANDROID_LOG_INFO, appTag, "Intent args = %s", args[0].c_str()); filter += args[0]; } else { __android_log_print(ANDROID_LOG_INFO, appTag, "No Intent args detected"); } int argc = 2; char *argv[] = {(char *)"foo", (char *)filter.c_str()}; __android_log_print(ANDROID_LOG_DEBUG, appTag, "filter = %s", argv[1]); // Route output to files until we can override the gtest output freopen("/sdcard/Android/data/com.example.VulkanLayerValidationTests/files/out.txt", "w", stdout); freopen("/sdcard/Android/data/com.example.VulkanLayerValidationTests/files/err.txt", "w", stderr); ::testing::InitGoogleTest(&argc, argv); ::testing::TestEventListeners &listeners = ::testing::UnitTest::GetInstance()->listeners(); listeners.Append(new LogcatPrinter); VkTestFramework::InitArgs(&argc, argv); ::testing::AddGlobalTestEnvironment(new TestEnvironment); int result = RUN_ALL_TESTS(); if (result != 0) { __android_log_print(ANDROID_LOG_INFO, appTag, "==== Tests FAILED ===="); } else { __android_log_print(ANDROID_LOG_INFO, appTag, "==== Tests PASSED ===="); } VkTestFramework::Finish(); fclose(stdout); fclose(stderr); ANativeActivity_finish(app->activity); return; } } } #endif #if defined(_WIN32) && !defined(NDEBUG) #include <crtdbg.h> #endif int main(int argc, char **argv) { int result; #ifdef ANDROID int vulkanSupport = InitVulkan(); if (vulkanSupport == 0) return 1; #endif #if defined(_WIN32) && !defined(NDEBUG) _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); #endif ::testing::InitGoogleTest(&argc, argv); VkTestFramework::InitArgs(&argc, argv); ::testing::AddGlobalTestEnvironment(new TestEnvironment); result = RUN_ALL_TESTS(); VkTestFramework::Finish(); return result; }
1
9,775
@cnorthrop -- here is a test with a name change. Is this going to affect your internal CI?
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -24,4 +24,7 @@ final class ChromeDriverCommand { private ChromeDriverCommand() {} static final String LAUNCH_APP = "launchApp"; + static final String SEND_COMMANDS_FOR_DOWNLOAD_CHROME_HEAD_LESS + = "sendCommandForDownloadChromeHeadLess"; + }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.chrome; /** * Constants for the ChromeDriver specific command IDs. */ final class ChromeDriverCommand { private ChromeDriverCommand() {} static final String LAUNCH_APP = "launchApp"; }
1
15,148
Nit: `Headless` is one word, not two, and so doesn't need camelcasing in this way.
SeleniumHQ-selenium
rb
@@ -122,6 +122,10 @@ class core(task.Config): parallel_scheduling = parameter.BoolParameter( default=False, description='Use multiprocessing to do scheduling in parallel.') + parallel_scheduling_processes = parameter.IntParameter( + default=None, + description='The number of processes to use for scheduling in parallel.' + ' The default is the number of available CPUs') assistant = parameter.BoolParameter( default=False, description='Run any task from the scheduler.')
1
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains the bindings for command line integration and dynamic loading of tasks If you don't want to run luigi from the command line. You may use the methods defined in this module to programatically run luigi. """ import logging import logging.config import os import sys import tempfile import signal import warnings from luigi import configuration from luigi import lock from luigi import parameter from luigi import rpc from luigi import scheduler from luigi import task from luigi import worker from luigi import execution_summary from luigi.cmdline_parser import CmdlineParser def setup_interface_logging(conf_file='', level_name='DEBUG'): # use a variable in the function object to determine if it has run before if getattr(setup_interface_logging, "has_run", False): return if conf_file == '': # no log config given, setup default logging level = getattr(logging, level_name, logging.DEBUG) logger = logging.getLogger('luigi-interface') logger.setLevel(level) stream_handler = logging.StreamHandler() stream_handler.setLevel(level) formatter = logging.Formatter('%(levelname)s: %(message)s') stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) else: logging.config.fileConfig(conf_file, disable_existing_loggers=False) setup_interface_logging.has_run = True class core(task.Config): ''' Keeps track of a bunch of environment params. Uses the internal luigi parameter mechanism. The nice thing is that we can instantiate this class and get an object with all the environment variables set. This is arguably a bit of a hack. ''' use_cmdline_section = False local_scheduler = parameter.BoolParameter( default=False, description='Use an in-memory central scheduler. Useful for testing.', always_in_help=True) scheduler_host = parameter.Parameter( default='localhost', description='Hostname of machine running remote scheduler', config_path=dict(section='core', name='default-scheduler-host')) scheduler_port = parameter.IntParameter( default=8082, description='Port of remote scheduler api process', config_path=dict(section='core', name='default-scheduler-port')) scheduler_url = parameter.Parameter( default='', description='Full path to remote scheduler', config_path=dict(section='core', name='default-scheduler-url'), ) lock_size = parameter.IntParameter( default=1, description="Maximum number of workers running the same command") no_lock = parameter.BoolParameter( default=False, description='Ignore if similar process is already running') lock_pid_dir = parameter.Parameter( default=os.path.join(tempfile.gettempdir(), 'luigi'), description='Directory to store the pid file') take_lock = parameter.BoolParameter( default=False, description='Signal other processes to stop getting work if already running') workers = parameter.IntParameter( default=1, description='Maximum number of parallel tasks to run') logging_conf_file = parameter.Parameter( default='', description='Configuration file for logging') log_level = parameter.ChoiceParameter( default='DEBUG', choices=['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], description="Default log level to use when logging_conf_file is not set") module = parameter.Parameter( default='', description='Used for dynamic loading of modules', always_in_help=True) parallel_scheduling = parameter.BoolParameter( default=False, description='Use multiprocessing to do scheduling in parallel.') assistant = parameter.BoolParameter( default=False, description='Run any task from the scheduler.') help = parameter.BoolParameter( default=False, description='Show most common flags and all task-specific flags', always_in_help=True) help_all = parameter.BoolParameter( default=False, description='Show all command line flags', always_in_help=True) class _WorkerSchedulerFactory(object): def create_local_scheduler(self): return scheduler.Scheduler(prune_on_get_work=True, record_task_history=False) def create_remote_scheduler(self, url): return rpc.RemoteScheduler(url) def create_worker(self, scheduler, worker_processes, assistant=False): return worker.Worker( scheduler=scheduler, worker_processes=worker_processes, assistant=assistant) def _schedule_and_run(tasks, worker_scheduler_factory=None, override_defaults=None): """ :param tasks: :param worker_scheduler_factory: :param override_defaults: :return: True if all tasks and their dependencies were successfully run (or already completed); False if any error occurred. """ if worker_scheduler_factory is None: worker_scheduler_factory = _WorkerSchedulerFactory() if override_defaults is None: override_defaults = {} env_params = core(**override_defaults) # search for logging configuration path first on the command line, then # in the application config file logging_conf = env_params.logging_conf_file if logging_conf != '' and not os.path.exists(logging_conf): raise Exception( "Error: Unable to locate specified logging configuration file!" ) if not configuration.get_config().getboolean( 'core', 'no_configure_logging', False): setup_interface_logging(logging_conf, env_params.log_level) kill_signal = signal.SIGUSR1 if env_params.take_lock else None if (not env_params.no_lock and not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size, kill_signal))): raise PidLockAlreadyTakenExit() if env_params.local_scheduler: sch = worker_scheduler_factory.create_local_scheduler() else: if env_params.scheduler_url != '': url = env_params.scheduler_url else: url = 'http://{host}:{port:d}/'.format( host=env_params.scheduler_host, port=env_params.scheduler_port, ) sch = worker_scheduler_factory.create_remote_scheduler(url=url) worker = worker_scheduler_factory.create_worker( scheduler=sch, worker_processes=env_params.workers, assistant=env_params.assistant) success = True logger = logging.getLogger('luigi-interface') with worker: for t in tasks: success &= worker.add(t, env_params.parallel_scheduling) logger.info('Done scheduling tasks') success &= worker.run() logger.info(execution_summary.summary(worker)) return dict(success=success, worker=worker) class PidLockAlreadyTakenExit(SystemExit): """ The exception thrown by :py:func:`luigi.run`, when the lock file is inaccessible """ pass def run(*args, **kwargs): return _run(*args, **kwargs)['success'] def _run(cmdline_args=None, main_task_cls=None, worker_scheduler_factory=None, use_dynamic_argparse=None, local_scheduler=False): """ Please dont use. Instead use `luigi` binary. Run from cmdline using argparse. :param cmdline_args: :param main_task_cls: :param worker_scheduler_factory: :param use_dynamic_argparse: Deprecated and ignored :param local_scheduler: """ if use_dynamic_argparse is not None: warnings.warn("use_dynamic_argparse is deprecated, don't set it.", DeprecationWarning, stacklevel=2) if cmdline_args is None: cmdline_args = sys.argv[1:] if main_task_cls: cmdline_args.insert(0, main_task_cls.task_family) if local_scheduler: cmdline_args.insert(0, '--local-scheduler') with CmdlineParser.global_instance(cmdline_args) as cp: return _schedule_and_run([cp.get_task_obj()], worker_scheduler_factory) def build(tasks, worker_scheduler_factory=None, **env_params): """ Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example: .. code-block:: python luigi.build([MyTask1(), MyTask2()], local_scheduler=True) One notable difference is that `build` defaults to not using the identical process lock. Otherwise, `build` would only be callable once from each process. :param tasks: :param worker_scheduler_factory: :param env_params: :return: True if there were no scheduling errors, even if tasks may fail. """ if "no_lock" not in env_params: env_params["no_lock"] = True return _schedule_and_run(tasks, worker_scheduler_factory, override_defaults=env_params)['success']
1
17,320
I think this will cause a warning, can you set the default to zero (`0`)?
spotify-luigi
py
@@ -2,7 +2,7 @@ package client_connection import ( "errors" - "github.com/mysterium/node/service_discovery/dto" + id "github.com/mysterium/node/identity" ) type State string
1
package client_connection import ( "errors" "github.com/mysterium/node/service_discovery/dto" ) type State string const ( NOT_CONNECTED = State("NOT_CONNECTED") NEGOTIATING = State("NEGOTIATING") CONNECTED = State("CONNECTED") ) var ( ALREADY_CONNECTED = errors.New("already connected") ) type ConnectionStatus struct { State State SessionId string } type Manager interface { Connect(identity dto.Identity, NodeKey string) error Status() ConnectionStatus Disconnect() error Wait() error } type fakeManager struct { errorChannel chan error } func NewManager() *fakeManager { return &fakeManager{make(chan error)} } func (nm *fakeManager) Connect(identity dto.Identity, NodeKey string) error { return nil } func (nm *fakeManager) Status() ConnectionStatus { return ConnectionStatus{NOT_CONNECTED, ""} } func (nm *fakeManager) Disconnect() error { nm.errorChannel <- errors.New("disconnected") return nil } func (nm *fakeManager) Wait() error { return <-nm.errorChannel }
1
9,730
confusing naming. Identity from "id" package :/ why we need alias here? (and in other imports)
mysteriumnetwork-node
go
@@ -355,9 +355,11 @@ def data(readonly=False): "Hide the window decoration when using wayland " "(requires restart)"), - ('show-keyhints', - SettingValue(typ.Bool(), 'true'), - "Show possible keychains based on the current keystring"), + ('keyhint-blacklist', + SettingValue(typ.List(none_ok=True), ''), + "Keychains that shouldn't be shown in the keyhint dialog\n\n" + "Globs are supported, so ';*' will blacklist all keychains" + "starting with ';'. Use '*' to disable keyhints"), readonly=readonly )),
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Configuration data for config.py. Module attributes: FIRST_COMMENT: The initial comment header to place in the config. SECTION_DESC: A dictionary with descriptions for sections. DATA: A global read-only copy of the default config, an OrderedDict of sections. """ import sys import re import collections from qutebrowser.config import configtypes as typ from qutebrowser.config import sections as sect from qutebrowser.config.value import SettingValue from qutebrowser.utils.qtutils import MAXVALS FIRST_COMMENT = r""" # vim: ft=dosini # Configfile for qutebrowser. # # This configfile is parsed by python's configparser in extended # interpolation mode. The format is very INI-like, so there are # categories like [general] with "key = value"-pairs. # # Note that you shouldn't add your own comments, as this file is # regenerated every time the config is saved. # # Interpolation looks like ${value} or ${section:value} and will be # replaced by the respective value. # # Some settings will expand environment variables. Note that, since # interpolation is run first, you will need to escape the $ char as # described below. # # This is the default config, so if you want to remove anything from # here (as opposed to change/add), for example a key binding, set it to # an empty value. # # You will need to escape the following values: # - # at the start of the line (at the first position of the key) (\#) # - $ in a value ($$) """ SECTION_DESC = { 'general': "General/miscellaneous options.", 'ui': "General options related to the user interface.", 'input': "Options related to input modes.", 'network': "Settings related to the network.", 'completion': "Options related to completion and command history.", 'tabs': "Configuration of the tab bar.", 'storage': "Settings related to cache and storage.", 'content': "Loaded plugins/scripts and allowed actions.", 'hints': "Hinting settings.", 'searchengines': ( "Definitions of search engines which can be used via the address " "bar.\n" "The searchengine named `DEFAULT` is used when " "`general -> auto-search` is true and something else than a URL was " "entered to be opened. Other search engines can be used by prepending " "the search engine name to the search term, e.g. " "`:open google qutebrowser`. The string `{}` will be replaced by the " "search term, use `{{` and `}}` for literal `{`/`}` signs."), 'aliases': ( "Aliases for commands.\n" "By default, no aliases are defined. Example which adds a new command " "`:qtb` to open qutebrowsers website:\n\n" "`qtb = open http://www.qutebrowser.org/`"), 'colors': ( "Colors used in the UI.\n" "A value can be in one of the following format:\n\n" " * `#RGB`/`#RRGGBB`/`#RRRGGGBBB`/`#RRRRGGGGBBBB`\n" " * A SVG color name as specified in http://www.w3.org/TR/SVG/" "types.html#ColorKeywords[the W3C specification].\n" " * transparent (no color)\n" " * `rgb(r, g, b)` / `rgba(r, g, b, a)` (values 0-255 or " "percentages)\n" " * `hsv(h, s, v)` / `hsva(h, s, v, a)` (values 0-255, hue 0-359)\n" " * A gradient as explained in http://doc.qt.io/qt-5/" "stylesheet-reference.html#list-of-property-types[the Qt " "documentation] under ``Gradient''.\n\n" "A *.system value determines the color system to use for color " "interpolation between similarly-named *.start and *.stop entries, " "regardless of how they are defined in the options. " "Valid values are 'rgb', 'hsv', and 'hsl'.\n\n" "The `hints.*` values are a special case as they're real CSS " "colors, not Qt-CSS colors. There, for a gradient, you need to use " "`-webkit-gradient`, see https://www.webkit.org/blog/175/introducing-" "css-gradients/[the WebKit documentation]."), 'fonts': ( "Fonts used for the UI, with optional style/weight/size.\n\n" " * Style: `normal`/`italic`/`oblique`\n" " * Weight: `normal`, `bold`, `100`..`900`\n" " * Size: _number_ `px`/`pt`"), } DEFAULT_FONT_SIZE = '10pt' if sys.platform == 'darwin' else '8pt' def data(readonly=False): """Get the default config data. Return: A {name: section} OrderedDict. """ return collections.OrderedDict([ ('general', sect.KeyValue( ('ignore-case', SettingValue(typ.IgnoreCase(), 'smart'), "Whether to find text on a page case-insensitively."), ('wrap-search', SettingValue(typ.Bool(), 'true'), "Whether to wrap finding text to the top when arriving at the " "end."), ('startpage', SettingValue(typ.List(), 'https://duckduckgo.com'), "The default page(s) to open at the start, separated by commas."), ('default-page', SettingValue(typ.FuzzyUrl(), '${startpage}'), "The page to open if :open -t/-b/-w is used without URL. Use " "`about:blank` for a blank page."), ('auto-search', SettingValue(typ.AutoSearch(), 'naive'), "Whether to start a search when something else than a URL is " "entered."), ('auto-save-config', SettingValue(typ.Bool(), 'true'), "Whether to save the config automatically on quit."), ('auto-save-interval', SettingValue(typ.Int(minval=0), '15000'), "How often (in milliseconds) to auto-save config/cookies/etc."), ('editor', SettingValue(typ.ShellCommand(placeholder=True), 'gvim -f "{}"'), "The editor (and arguments) to use for the `open-editor` " "command.\n\n" "The arguments get split like in a shell, so you can use `\"` or " "`'` to quote them.\n" "`{}` gets replaced by the filename of the file to be edited."), ('editor-encoding', SettingValue(typ.Encoding(), 'utf-8'), "Encoding to use for editor."), ('private-browsing', SettingValue(typ.Bool(), 'false'), "Do not record visited pages in the history or store web page " "icons."), ('developer-extras', SettingValue(typ.Bool(), 'false'), "Enable extra tools for Web developers.\n\n" "This needs to be enabled for `:inspector` to work and also adds " "an _Inspect_ entry to the context menu."), ('print-element-backgrounds', SettingValue(typ.Bool(), 'true'), "Whether the background color and images are also drawn when the " "page is printed."), ('xss-auditing', SettingValue(typ.Bool(), 'false'), "Whether load requests should be monitored for cross-site " "scripting attempts.\n\n" "Suspicious scripts will be blocked and reported in the " "inspector's JavaScript console. Enabling this feature might " "have an impact on performance."), ('site-specific-quirks', SettingValue(typ.Bool(), 'true'), "Enable workarounds for broken sites."), ('default-encoding', SettingValue(typ.String(none_ok=True), ''), "Default encoding to use for websites.\n\n" "The encoding must be a string describing an encoding such as " "_utf-8_, _iso-8859-1_, etc. If left empty a default value will " "be used."), ('new-instance-open-target', SettingValue(typ.String( valid_values=typ.ValidValues( ('tab', "Open a new tab in the existing " "window and activate the window."), ('tab-bg', "Open a new background tab in the " "existing window and activate the " "window."), ('tab-silent', "Open a new tab in the existing " "window without activating " "the window."), ('tab-bg-silent', "Open a new background tab " "in the existing window " "without activating the " "window."), ('window', "Open in a new window.") )), 'tab'), "How to open links in an existing instance if a new one is " "launched."), ('log-javascript-console', SettingValue(typ.String( valid_values=typ.ValidValues( ('none', "Don't log messages."), ('debug', "Log messages with debug level."), ('info', "Log messages with info level.") )), 'debug'), "How to log javascript console messages."), ('save-session', SettingValue(typ.Bool(), 'false'), "Whether to always save the open pages."), ('session-default-name', SettingValue(typ.SessionName(none_ok=True), ''), "The name of the session to save by default, or empty for the " "last loaded session."), ('url-incdec-segments', SettingValue( typ.FlagList(valid_values=typ.ValidValues( 'host', 'path', 'query', 'anchor')), 'path,query'), "The URL segments where `:navigate increment/decrement` will " "search for a number."), readonly=readonly )), ('ui', sect.KeyValue( ('zoom-levels', SettingValue(typ.PercList(minval=0), '25%,33%,50%,67%,75%,90%,100%,110%,125%,150%,175%,' '200%,250%,300%,400%,500%'), "The available zoom levels, separated by commas."), ('default-zoom', SettingValue(typ.Perc(), '100%'), "The default zoom level."), ('downloads-position', SettingValue(typ.VerticalPosition(), 'top'), "Where to show the downloaded files."), ('message-timeout', SettingValue(typ.Int(), '2000'), "Time (in ms) to show messages in the statusbar for."), ('message-unfocused', SettingValue(typ.Bool(), 'false'), "Whether to show messages in unfocused windows."), ('confirm-quit', SettingValue(typ.ConfirmQuit(), 'never'), "Whether to confirm quitting the application."), ('display-statusbar-messages', SettingValue(typ.Bool(), 'false'), "Whether to display javascript statusbar messages."), ('zoom-text-only', SettingValue(typ.Bool(), 'false'), "Whether the zoom factor on a frame applies only to the text or " "to all content."), ('frame-flattening', SettingValue(typ.Bool(), 'false'), "Whether to expand each subframe to its contents.\n\n" "This will flatten all the frames to become one scrollable " "page."), ('user-stylesheet', SettingValue(typ.UserStyleSheet(none_ok=True), '::-webkit-scrollbar { width: 0px; height: 0px; }'), "User stylesheet to use (absolute filename, filename relative to " "the config directory or CSS string). Will expand environment " "variables."), ('css-media-type', SettingValue(typ.String(none_ok=True), ''), "Set the CSS media type."), ('smooth-scrolling', SettingValue(typ.Bool(), 'false'), "Whether to enable smooth scrolling for webpages."), ('remove-finished-downloads', SettingValue(typ.Int(minval=-1), '-1'), "Number of milliseconds to wait before removing finished " "downloads. Will not be removed if value is -1."), ('hide-statusbar', SettingValue(typ.Bool(), 'false'), "Whether to hide the statusbar unless a message is shown."), ('statusbar-padding', SettingValue(typ.Padding(), '1,1,0,0'), "Padding for statusbar (top, bottom, left, right)."), ('window-title-format', SettingValue(typ.FormatString(fields=['perc', 'perc_raw', 'title', 'title_sep', 'id', 'scroll_pos']), '{perc}{title}{title_sep}qutebrowser'), "The format to use for the window title. The following " "placeholders are defined:\n\n" "* `{perc}`: The percentage as a string like `[10%]`.\n" "* `{perc_raw}`: The raw percentage, e.g. `10`\n" "* `{title}`: The title of the current web page\n" "* `{title_sep}`: The string ` - ` if a title is set, empty " "otherwise.\n" "* `{id}`: The internal window ID of this window.\n" "* `{scroll_pos}`: The page scroll position."), ('hide-mouse-cursor', SettingValue(typ.Bool(), 'false'), "Whether to hide the mouse cursor."), ('modal-js-dialog', SettingValue(typ.Bool(), 'false'), "Use standard JavaScript modal dialog for alert() and confirm()"), ('hide-wayland-decoration', SettingValue(typ.Bool(), 'false'), "Hide the window decoration when using wayland " "(requires restart)"), ('show-keyhints', SettingValue(typ.Bool(), 'true'), "Show possible keychains based on the current keystring"), readonly=readonly )), ('network', sect.KeyValue( ('do-not-track', SettingValue(typ.Bool(), 'true'), "Value to send in the `DNT` header."), ('accept-language', SettingValue(typ.String(none_ok=True), 'en-US,en'), "Value to send in the `accept-language` header."), ('referer-header', SettingValue(typ.String( valid_values=typ.ValidValues( ('always', "Always send."), ('never', "Never send; this is not recommended," " as some sites may break."), ('same-domain', "Only send for the same domain." " This will still protect your privacy, but" " shouldn't break any sites.") )), 'same-domain'), "Send the Referer header"), ('user-agent', SettingValue(typ.UserAgent(none_ok=True), ''), "User agent to send. Empty to send the default."), ('proxy', SettingValue(typ.Proxy(), 'system'), "The proxy to use.\n\n" "In addition to the listed values, you can use a `socks://...` " "or `http://...` URL."), ('proxy-dns-requests', SettingValue(typ.Bool(), 'true'), "Whether to send DNS requests over the configured proxy."), ('ssl-strict', SettingValue(typ.BoolAsk(), 'ask'), "Whether to validate SSL handshakes."), ('dns-prefetch', SettingValue(typ.Bool(), 'true'), "Whether to try to pre-fetch DNS entries to speed up browsing."), ('custom-headers', SettingValue(typ.HeaderDict(none_ok=True), ''), "Set custom headers for qutebrowser HTTP requests."), readonly=readonly )), ('completion', sect.KeyValue( ('auto-open', SettingValue(typ.Bool(), 'true'), "Automatically open completion when typing."), ('download-path-suggestion', SettingValue( typ.String(valid_values=typ.ValidValues( ('path', "Show only the download path."), ('filename', "Show only download filename."), ('both', "Show download path and filename."))), 'path'), "What to display in the download filename input."), ('timestamp-format', SettingValue(typ.TimestampTemplate(none_ok=True), '%Y-%m-%d'), "How to format timestamps (e.g. for history)"), ('show', SettingValue(typ.Bool(), 'true'), "Whether to show the autocompletion window."), ('height', SettingValue(typ.PercOrInt(minperc=0, maxperc=100, minint=1), '50%'), "The height of the completion, in px or as percentage of the " "window."), ('cmd-history-max-items', SettingValue(typ.Int(minval=-1), '100'), "How many commands to save in the command history.\n\n" "0: no history / -1: unlimited"), ('web-history-max-items', SettingValue(typ.Int(minval=-1), '1000'), "How many URLs to show in the web history.\n\n" "0: no history / -1: unlimited"), ('quick-complete', SettingValue(typ.Bool(), 'true'), "Whether to move on to the next part when there's only one " "possible completion left."), ('shrink', SettingValue(typ.Bool(), 'false'), "Whether to shrink the completion to be smaller than the " "configured size if there are no scrollbars."), ('scrollbar-width', SettingValue(typ.Int(minval=0), '12'), "Width of the scrollbar in the completion window (in px)."), ('scrollbar-padding', SettingValue(typ.Int(minval=0), '2'), "Padding of scrollbar handle in completion window (in px)."), readonly=readonly )), ('input', sect.KeyValue( ('timeout', SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '500'), "Timeout for ambiguous key bindings.\n\n" "If the current input forms both a complete match and a partial " "match, the complete match will be executed after this time."), ('partial-timeout', SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '5000'), "Timeout for partially typed key bindings.\n\n" "If the current input forms only partial matches, the keystring " "will be cleared after this time."), ('insert-mode-on-plugins', SettingValue(typ.Bool(), 'false'), "Whether to switch to insert mode when clicking flash and other " "plugins."), ('auto-leave-insert-mode', SettingValue(typ.Bool(), 'true'), "Whether to leave insert mode if a non-editable element is " "clicked."), ('auto-insert-mode', SettingValue(typ.Bool(), 'false'), "Whether to automatically enter insert mode if an editable " "element is focused after page load."), ('forward-unbound-keys', SettingValue(typ.String( valid_values=typ.ValidValues( ('all', "Forward all unbound keys."), ('auto', "Forward unbound non-alphanumeric " "keys."), ('none', "Don't forward any keys.") )), 'auto'), "Whether to forward unbound keys to the webview in normal mode."), ('spatial-navigation', SettingValue(typ.Bool(), 'false'), "Enables or disables the Spatial Navigation feature.\n\n" "Spatial navigation consists in the ability to navigate between " "focusable elements in a Web page, such as hyperlinks and form " "controls, by using Left, Right, Up and Down arrow keys. For " "example, if a user presses the Right key, heuristics determine " "whether there is an element he might be trying to reach towards " "the right and which element he probably wants."), ('links-included-in-focus-chain', SettingValue(typ.Bool(), 'true'), "Whether hyperlinks should be included in the keyboard focus " "chain."), ('rocker-gestures', SettingValue(typ.Bool(), 'false'), "Whether to enable Opera-like mouse rocker gestures. This " "disables the context menu."), ('mouse-zoom-divider', SettingValue(typ.Int(minval=1), '512'), "How much to divide the mouse wheel movements to translate them " "into zoom increments."), readonly=readonly )), ('tabs', sect.KeyValue( ('background-tabs', SettingValue(typ.Bool(), 'false'), "Whether to open new tabs (middleclick/ctrl+click) in " "background."), ('select-on-remove', SettingValue(typ.SelectOnRemove(), 'right'), "Which tab to select when the focused tab is removed."), ('new-tab-position', SettingValue(typ.NewTabPosition(), 'right'), "How new tabs are positioned."), ('new-tab-position-explicit', SettingValue(typ.NewTabPosition(), 'last'), "How new tabs opened explicitly are positioned."), ('last-close', SettingValue(typ.String( valid_values=typ.ValidValues( ('ignore', "Don't do anything."), ('blank', "Load a blank page."), ('startpage', "Load the start page."), ('default-page', "Load the default page."), ('close', "Close the window.") )), 'ignore'), "Behavior when the last tab is closed."), ('show', SettingValue( typ.String(valid_values=typ.ValidValues( ('always', "Always show the tab bar."), ('never', "Always hide the tab bar."), ('multiple', "Hide the tab bar if only one tab " "is open."), ('switching', "Show the tab bar when switching " "tabs.") )), 'always'), "When to show the tab bar"), ('show-switching-delay', SettingValue(typ.Int(), '800'), "Time to show the tab bar before hiding it when tabs->show is " "set to 'switching'."), ('wrap', SettingValue(typ.Bool(), 'true'), "Whether to wrap when changing tabs."), ('movable', SettingValue(typ.Bool(), 'true'), "Whether tabs should be movable."), ('close-mouse-button', SettingValue(typ.String( valid_values=typ.ValidValues( ('right', "Close tabs on right-click."), ('middle', "Close tabs on middle-click."), ('none', "Don't close tabs using the mouse.") )), 'middle'), "On which mouse button to close tabs."), ('position', SettingValue(typ.Position(), 'top'), "The position of the tab bar."), ('show-favicons', SettingValue(typ.Bool(), 'true'), "Whether to show favicons in the tab bar."), ('width', SettingValue(typ.PercOrInt(minperc=0, maxperc=100, minint=1), '20%'), "The width of the tab bar if it's vertical, in px or as " "percentage of the window."), ('indicator-width', SettingValue(typ.Int(minval=0), '3'), "Width of the progress indicator (0 to disable)."), ('tabs-are-windows', SettingValue(typ.Bool(), 'false'), "Whether to open windows instead of tabs."), ('title-format', SettingValue(typ.FormatString( fields=['perc', 'perc_raw', 'title', 'title_sep', 'index', 'id', 'scroll_pos']), '{index}: {title}'), "The format to use for the tab title. The following placeholders " "are defined:\n\n" "* `{perc}`: The percentage as a string like `[10%]`.\n" "* `{perc_raw}`: The raw percentage, e.g. `10`\n" "* `{title}`: The title of the current web page\n" "* `{title_sep}`: The string ` - ` if a title is set, empty " "otherwise.\n" "* `{index}`: The index of this tab.\n" "* `{id}`: The internal tab ID of this tab.\n" "* `{scroll_pos}`: The page scroll position."), ('title-alignment', SettingValue(typ.TextAlignment(), 'left'), "Alignment of the text inside of tabs"), ('mousewheel-tab-switching', SettingValue(typ.Bool(), 'true'), "Switch between tabs using the mouse wheel."), ('padding', SettingValue(typ.Padding(), '0,0,5,5'), "Padding for tabs (top, bottom, left, right)."), ('indicator-padding', SettingValue(typ.Padding(), '2,2,0,4'), "Padding for indicators (top, bottom, left, right)."), readonly=readonly )), ('storage', sect.KeyValue( ('download-directory', SettingValue(typ.Directory(none_ok=True), ''), "The directory to save downloads to. An empty value selects a " "sensible os-specific default. Will expand environment " "variables."), ('prompt-download-directory', SettingValue(typ.Bool(), 'true'), "Whether to prompt the user for the download location.\n" "If set to false, 'download-directory' will be used."), ('remember-download-directory', SettingValue(typ.Bool(), 'true'), "Whether to remember the last used download directory."), ('maximum-pages-in-cache', SettingValue( typ.Int(none_ok=True, minval=0, maxval=MAXVALS['int']), ''), "The maximum number of pages to hold in the global memory page " "cache.\n\n" "The Page Cache allows for a nicer user experience when " "navigating forth or back to pages in the forward/back history, " "by pausing and resuming up to _n_ pages.\n\n" "For more information about the feature, please refer to: " "http://webkit.org/blog/427/webkit-page-cache-i-the-basics/"), ('object-cache-capacities', SettingValue( typ.WebKitBytesList(length=3, maxsize=MAXVALS['int'], none_ok=True), ''), "The capacities for the global memory cache for dead objects " "such as stylesheets or scripts. Syntax: cacheMinDeadCapacity, " "cacheMaxDead, totalCapacity.\n\n" "The _cacheMinDeadCapacity_ specifies the minimum number of " "bytes that dead objects should consume when the cache is under " "pressure.\n\n" "_cacheMaxDead_ is the maximum number of bytes that dead objects " "should consume when the cache is *not* under pressure.\n\n" "_totalCapacity_ specifies the maximum number of bytes " "that the cache should consume *overall*."), ('offline-storage-default-quota', SettingValue(typ.WebKitBytes(maxsize=MAXVALS['int64'], none_ok=True), ''), "Default quota for new offline storage databases."), ('offline-web-application-cache-quota', SettingValue(typ.WebKitBytes(maxsize=MAXVALS['int64'], none_ok=True), ''), "Quota for the offline web application cache."), ('offline-storage-database', SettingValue(typ.Bool(), 'true'), "Whether support for the HTML 5 offline storage feature is " "enabled."), ('offline-web-application-storage', SettingValue(typ.Bool(), 'true'), "Whether support for the HTML 5 web application cache feature is " "enabled.\n\n" "An application cache acts like an HTTP cache in some sense. For " "documents that use the application cache via JavaScript, the " "loader engine will first ask the application cache for the " "contents, before hitting the network.\n\n" "The feature is described in details at: " "http://dev.w3.org/html5/spec/Overview.html#appcache"), ('local-storage', SettingValue(typ.Bool(), 'true'), "Whether support for the HTML 5 local storage feature is " "enabled."), ('cache-size', SettingValue(typ.Int(minval=0, maxval=MAXVALS['int64']), '52428800'), "Size of the HTTP network cache."), readonly=readonly )), ('content', sect.KeyValue( ('allow-images', SettingValue(typ.Bool(), 'true'), "Whether images are automatically loaded in web pages."), ('allow-javascript', SettingValue(typ.Bool(), 'true'), "Enables or disables the running of JavaScript programs."), ('allow-plugins', SettingValue(typ.Bool(), 'false'), "Enables or disables plugins in Web pages.\n\n" 'Qt plugins with a mimetype such as "application/x-qt-plugin" ' "are not affected by this setting."), ('webgl', SettingValue(typ.Bool(), 'false'), "Enables or disables WebGL."), ('css-regions', SettingValue(typ.Bool(), 'true'), "Enable or disable support for CSS regions."), ('hyperlink-auditing', SettingValue(typ.Bool(), 'false'), "Enable or disable hyperlink auditing (<a ping>)."), ('geolocation', SettingValue(typ.BoolAsk(), 'ask'), "Allow websites to request geolocations."), ('notifications', SettingValue(typ.BoolAsk(), 'ask'), "Allow websites to show notifications."), #('allow-java', # SettingValue(typ.Bool(), 'true'), # "Enables or disables Java applets. Currently Java applets are " # "not supported"), ('javascript-can-open-windows', SettingValue(typ.Bool(), 'false'), "Whether JavaScript programs can open new windows."), ('javascript-can-close-windows', SettingValue(typ.Bool(), 'false'), "Whether JavaScript programs can close windows."), ('javascript-can-access-clipboard', SettingValue(typ.Bool(), 'false'), "Whether JavaScript programs can read or write to the " "clipboard."), ('ignore-javascript-prompt', SettingValue(typ.Bool(), 'false'), "Whether all javascript prompts should be ignored."), ('ignore-javascript-alert', SettingValue(typ.Bool(), 'false'), "Whether all javascript alerts should be ignored."), ('local-content-can-access-remote-urls', SettingValue(typ.Bool(), 'false'), "Whether locally loaded documents are allowed to access remote " "urls."), ('local-content-can-access-file-urls', SettingValue(typ.Bool(), 'true'), "Whether locally loaded documents are allowed to access other " "local urls."), ('cookies-accept', SettingValue(typ.String( valid_values=typ.ValidValues( ('all', "Accept all cookies."), ('no-3rdparty', "Accept cookies from the same" " origin only."), ('no-unknown-3rdparty', "Accept cookies from " "the same origin only, unless a cookie is " "already set for the domain."), ('never', "Don't accept cookies at all.") )), 'no-3rdparty'), "Control which cookies to accept."), ('cookies-store', SettingValue(typ.Bool(), 'true'), "Whether to store cookies."), ('host-block-lists', SettingValue( typ.UrlList(none_ok=True), 'http://www.malwaredomainlist.com/hostslist/hosts.txt,' 'http://someonewhocares.org/hosts/hosts,' 'http://winhelp2002.mvps.org/hosts.zip,' 'http://malwaredomains.lehigh.edu/files/justdomains.zip,' 'http://pgl.yoyo.org/adservers/serverlist.php?' 'hostformat=hosts&mimetype=plaintext'), "List of URLs of lists which contain hosts to block.\n\n" "The file can be in one of the following formats:\n\n" "- An '/etc/hosts'-like file\n" "- One host per line\n" "- A zip-file of any of the above, with either only one file, or " "a file named 'hosts' (with any extension)."), ('host-blocking-enabled', SettingValue(typ.Bool(), 'true'), "Whether host blocking is enabled."), ('host-blocking-whitelist', SettingValue(typ.List(none_ok=True), 'piwik.org'), "List of domains that should always be loaded, despite being " "ad-blocked.\n\n" "Domains may contain * and ? wildcards and are otherwise " "required to exactly match the requested domain.\n\n" "Local domains are always exempt from hostblocking."), ('enable-pdfjs', SettingValue(typ.Bool(), 'false'), "Enable pdf.js to view PDF files in the browser.\n\n" "Note that the files can still be downloaded by clicking" " the download button in the pdf.js viewer."), readonly=readonly )), ('hints', sect.KeyValue( ('border', SettingValue(typ.String(), '1px solid #E3BE23'), "CSS border value for hints."), ('opacity', SettingValue(typ.Float(minval=0.0, maxval=1.0), '0.7'), "Opacity for hints."), ('mode', SettingValue(typ.String( valid_values=typ.ValidValues( ('number', "Use numeric hints."), ('letter', "Use the chars in the hints -> " "chars setting."), ('word', "Use hints words based on the html " "elements and the extra words."), )), 'letter'), "Mode to use for hints."), ('chars', SettingValue(typ.UniqueCharString(minlen=2, completions=[ ('asdfghjkl', "Home row"), ('dhtnaoeu', "Home row (Dvorak)"), ('abcdefghijklmnopqrstuvwxyz', "All letters"), ]), 'asdfghjkl'), "Chars used for hint strings."), ('min-chars', SettingValue(typ.Int(minval=1), '1'), "Minimum number of chars used for hint strings."), ('scatter', SettingValue(typ.Bool(), 'true'), "Whether to scatter hint key chains (like Vimium) or not (like " "dwb)."), ('uppercase', SettingValue(typ.Bool(), 'false'), "Make chars in hint strings uppercase."), ('dictionary', SettingValue(typ.File(required=False), '/usr/share/dict/words'), "The dictionary file to be used by the word hints."), ('auto-follow', SettingValue(typ.Bool(), 'true'), "Follow a hint immediately when the hint text is completely " "matched."), ('next-regexes', SettingValue(typ.RegexList(flags=re.IGNORECASE), r'\bnext\b,\bmore\b,\bnewer\b,\b[>→≫]\b,\b(>>|»)\b,' r'\bcontinue\b'), "A comma-separated list of regexes to use for 'next' links."), ('prev-regexes', SettingValue(typ.RegexList(flags=re.IGNORECASE), r'\bprev(ious)?\b,\bback\b,\bolder\b,\b[<←≪]\b,' r'\b(<<|«)\b'), "A comma-separated list of regexes to use for 'prev' links."), readonly=readonly )), ('searchengines', sect.ValueList( typ.SearchEngineName(), typ.SearchEngineUrl(), ('DEFAULT', 'https://duckduckgo.com/?q={}'), readonly=readonly )), ('aliases', sect.ValueList( typ.String(forbidden=' '), typ.Command(), readonly=readonly )), ('colors', sect.KeyValue( ('completion.fg', SettingValue(typ.QtColor(), 'white'), "Text color of the completion widget."), ('completion.bg', SettingValue(typ.QssColor(), '#333333'), "Background color of the completion widget."), ('completion.alternate-bg', SettingValue(typ.QssColor(), '#444444'), "Alternating background color of the completion widget."), ('completion.category.fg', SettingValue(typ.QtColor(), 'white'), "Foreground color of completion widget category headers."), ('completion.category.bg', SettingValue(typ.QssColor(), 'qlineargradient(x1:0, y1:0, x2:0, ' 'y2:1, stop:0 #888888, stop:1 #505050)'), "Background color of the completion widget category headers."), ('completion.category.border.top', SettingValue(typ.QssColor(), 'black'), "Top border color of the completion widget category headers."), ('completion.category.border.bottom', SettingValue(typ.QssColor(), '${completion.category.border.top}'), "Bottom border color of the completion widget category headers."), ('completion.item.selected.fg', SettingValue(typ.QtColor(), 'black'), "Foreground color of the selected completion item."), ('completion.item.selected.bg', SettingValue(typ.QssColor(), '#e8c000'), "Background color of the selected completion item."), ('completion.item.selected.border.top', SettingValue(typ.QssColor(), '#bbbb00'), "Top border color of the completion widget category headers."), ('completion.item.selected.border.bottom', SettingValue( typ.QssColor(), '${completion.item.selected.border.top}'), "Bottom border color of the selected completion item."), ('completion.match.fg', SettingValue(typ.QssColor(), '#ff4444'), "Foreground color of the matched text in the completion."), ('completion.scrollbar.fg', SettingValue(typ.QssColor(), '${completion.fg}'), "Color of the scrollbar handle in completion view."), ('completion.scrollbar.bg', SettingValue(typ.QssColor(), '${completion.bg}'), "Color of the scrollbar in completion view"), ('statusbar.fg', SettingValue(typ.QssColor(), 'white'), "Foreground color of the statusbar."), ('statusbar.bg', SettingValue(typ.QssColor(), 'black'), "Background color of the statusbar."), ('statusbar.fg.error', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar if there was an error."), ('statusbar.bg.error', SettingValue(typ.QssColor(), 'red'), "Background color of the statusbar if there was an error."), ('statusbar.fg.warning', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar if there is a warning."), ('statusbar.bg.warning', SettingValue(typ.QssColor(), 'darkorange'), "Background color of the statusbar if there is a warning."), ('statusbar.fg.prompt', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar if there is a prompt."), ('statusbar.bg.prompt', SettingValue(typ.QssColor(), 'darkblue'), "Background color of the statusbar if there is a prompt."), ('statusbar.fg.insert', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar in insert mode."), ('statusbar.bg.insert', SettingValue(typ.QssColor(), 'darkgreen'), "Background color of the statusbar in insert mode."), ('statusbar.fg.command', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar in command mode."), ('statusbar.bg.command', SettingValue(typ.QssColor(), '${statusbar.bg}'), "Background color of the statusbar in command mode."), ('statusbar.fg.caret', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar in caret mode."), ('statusbar.bg.caret', SettingValue(typ.QssColor(), 'purple'), "Background color of the statusbar in caret mode."), ('statusbar.fg.caret-selection', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar in caret mode with a " "selection"), ('statusbar.bg.caret-selection', SettingValue(typ.QssColor(), '#a12dff'), "Background color of the statusbar in caret mode with a " "selection"), ('statusbar.progress.bg', SettingValue(typ.QssColor(), 'white'), "Background color of the progress bar."), ('statusbar.url.fg', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Default foreground color of the URL in the statusbar."), ('statusbar.url.fg.success', SettingValue(typ.QssColor(), 'white'), "Foreground color of the URL in the statusbar on successful " "load (http)."), ('statusbar.url.fg.success.https', SettingValue(typ.QssColor(), 'lime'), "Foreground color of the URL in the statusbar on successful " "load (https)."), ('statusbar.url.fg.error', SettingValue(typ.QssColor(), 'orange'), "Foreground color of the URL in the statusbar on error."), ('statusbar.url.fg.warn', SettingValue(typ.QssColor(), 'yellow'), "Foreground color of the URL in the statusbar when there's a " "warning."), ('statusbar.url.fg.hover', SettingValue(typ.QssColor(), 'aqua'), "Foreground color of the URL in the statusbar for hovered " "links."), ('tabs.fg.odd', SettingValue(typ.QtColor(), 'white'), "Foreground color of unselected odd tabs."), ('tabs.bg.odd', SettingValue(typ.QtColor(), 'grey'), "Background color of unselected odd tabs."), ('tabs.fg.even', SettingValue(typ.QtColor(), 'white'), "Foreground color of unselected even tabs."), ('tabs.bg.even', SettingValue(typ.QtColor(), 'darkgrey'), "Background color of unselected even tabs."), ('tabs.fg.selected.odd', SettingValue(typ.QtColor(), 'white'), "Foreground color of selected odd tabs."), ('tabs.bg.selected.odd', SettingValue(typ.QtColor(), 'black'), "Background color of selected odd tabs."), ('tabs.fg.selected.even', SettingValue(typ.QtColor(), '${tabs.fg.selected.odd}'), "Foreground color of selected even tabs."), ('tabs.bg.selected.even', SettingValue(typ.QtColor(), '${tabs.bg.selected.odd}'), "Background color of selected even tabs."), ('tabs.bg.bar', SettingValue(typ.QtColor(), '#555555'), "Background color of the tab bar."), ('tabs.indicator.start', SettingValue(typ.QtColor(), '#0000aa'), "Color gradient start for the tab indicator."), ('tabs.indicator.stop', SettingValue(typ.QtColor(), '#00aa00'), "Color gradient end for the tab indicator."), ('tabs.indicator.error', SettingValue(typ.QtColor(), '#ff0000'), "Color for the tab indicator on errors.."), ('tabs.indicator.system', SettingValue(typ.ColorSystem(), 'rgb'), "Color gradient interpolation system for the tab indicator."), ('hints.fg', SettingValue(typ.CssColor(), 'black'), "Font color for hints."), ('hints.bg', SettingValue( typ.CssColor(), '-webkit-gradient(linear, left top, ' 'left bottom, color-stop(0%,#FFF785), ' 'color-stop(100%,#FFC542))'), "Background color for hints."), ('hints.fg.match', SettingValue(typ.CssColor(), 'green'), "Font color for the matched part of hints."), ('downloads.bg.bar', SettingValue(typ.QssColor(), 'black'), "Background color for the download bar."), ('downloads.fg.start', SettingValue(typ.QtColor(), 'white'), "Color gradient start for download text."), ('downloads.bg.start', SettingValue(typ.QtColor(), '#0000aa'), "Color gradient start for download backgrounds."), ('downloads.fg.stop', SettingValue(typ.QtColor(), '${downloads.fg.start}'), "Color gradient end for download text."), ('downloads.bg.stop', SettingValue(typ.QtColor(), '#00aa00'), "Color gradient stop for download backgrounds."), ('downloads.fg.system', SettingValue(typ.ColorSystem(), 'rgb'), "Color gradient interpolation system for download text."), ('downloads.bg.system', SettingValue(typ.ColorSystem(), 'rgb'), "Color gradient interpolation system for download backgrounds."), ('downloads.fg.error', SettingValue(typ.QtColor(), 'white'), "Foreground color for downloads with errors."), ('downloads.bg.error', SettingValue(typ.QtColor(), 'red'), "Background color for downloads with errors."), ('webpage.bg', SettingValue(typ.QtColor(none_ok=True), 'white'), "Background color for webpages if unset (or empty to use the " "theme's color)"), ('keyhint.fg', SettingValue(typ.QssColor(), '#FFFFFF'), "Text color for the keyhint widget."), ('keyhint.fg.suffix', SettingValue(typ.CssColor(), '#FFFF00'), "Highlight color for keys to complete the current keychain"), ('keyhint.bg', SettingValue(typ.QssColor(), 'rgba(0, 0, 0, 80%)'), "Background color of the keyhint widget."), readonly=readonly )), ('fonts', sect.KeyValue( ('_monospace', SettingValue(typ.Font(), 'Terminus, Monospace, ' '"DejaVu Sans Mono", Monaco, ' '"Bitstream Vera Sans Mono", "Andale Mono", ' '"Courier New", Courier, "Liberation Mono", ' 'monospace, Fixed, Consolas, Terminal'), "Default monospace fonts."), ('completion', SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used in the completion widget."), ('tabbar', SettingValue(typ.QtFont(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used in the tab bar."), ('statusbar', SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used in the statusbar."), ('downloads', SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used for the downloadbar."), ('hints', SettingValue(typ.Font(), 'bold 13px Monospace'), "Font used for the hints."), ('debug-console', SettingValue(typ.QtFont(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used for the debugging console."), ('web-family-standard', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for standard fonts."), ('web-family-fixed', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for fixed fonts."), ('web-family-serif', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for serif fonts."), ('web-family-sans-serif', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for sans-serif fonts."), ('web-family-cursive', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for cursive fonts."), ('web-family-fantasy', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for fantasy fonts."), ('web-size-minimum', SettingValue( typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''), "The hard minimum font size."), ('web-size-minimum-logical', SettingValue( typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''), "The minimum logical font size that is applied when zooming " "out."), ('web-size-default', SettingValue( typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''), "The default font size for regular text."), ('web-size-default-fixed', SettingValue( typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''), "The default font size for fixed-pitch text."), ('keyhint', SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used in the keyhint widget."), readonly=readonly )), ]) DATA = data(readonly=True) KEY_FIRST_COMMENT = """ # vim: ft=conf # # In this config file, qutebrowser's key bindings are configured. # The format looks like this: # # [keymode] # # command # keychain # keychain2 # ... # # All blank lines and lines starting with '#' are ignored. # Inline-comments are not permitted. # # keymode is a comma separated list of modes in which the key binding should be # active. If keymode starts with !, the key binding is active in all modes # except the listed modes. # # For special keys (can't be part of a keychain), enclose them in `<`...`>`. # For modifiers, you can use either `-` or `+` as delimiters, and these names: # # * Control: `Control`, `Ctrl` # * Meta: `Meta`, `Windows`, `Mod4` # * Alt: `Alt`, `Mod1` # * Shift: `Shift` # # For simple keys (no `<>`-signs), a capital letter means the key is pressed # with Shift. For special keys (with `<>`-signs), you need to explicitly add # `Shift-` to match a key pressed with shift. You can bind multiple commands # by separating them with `;;`. # # Note that default keybindings are always bound, and need to be explicitly # unbound if you wish to remove them: # # <unbound> # keychain # keychain2 # ... """ KEY_SECTION_DESC = { 'all': "Keybindings active in all modes.", 'normal': "Keybindings for normal mode.", 'insert': ( "Keybindings for insert mode.\n" "Since normal keypresses are passed through, only special keys are " "supported in this mode.\n" "Useful hidden commands to map in this section:\n\n" " * `open-editor`: Open a texteditor with the focused field.\n" " * `paste-primary`: Paste primary selection at cursor position."), 'hint': ( "Keybindings for hint mode.\n" "Since normal keypresses are passed through, only special keys are " "supported in this mode.\n" "Useful hidden commands to map in this section:\n\n" " * `follow-hint`: Follow the currently selected hint."), 'passthrough': ( "Keybindings for passthrough mode.\n" "Since normal keypresses are passed through, only special keys are " "supported in this mode."), 'command': ( "Keybindings for command mode.\n" "Since normal keypresses are passed through, only special keys are " "supported in this mode.\n" "Useful hidden commands to map in this section:\n\n" " * `command-history-prev`: Switch to previous command in history.\n" " * `command-history-next`: Switch to next command in history.\n" " * `completion-item-prev`: Select previous item in completion.\n" " * `completion-item-next`: Select next item in completion.\n" " * `command-accept`: Execute the command currently in the " "commandline."), 'prompt': ( "Keybindings for prompts in the status line.\n" "You can bind normal keys in this mode, but they will be only active " "when a yes/no-prompt is asked. For other prompt modes, you can only " "bind special keys.\n" "Useful hidden commands to map in this section:\n\n" " * `prompt-accept`: Confirm the entered value.\n" " * `prompt-yes`: Answer yes to a yes/no question.\n" " * `prompt-no`: Answer no to a yes/no question."), 'caret': ( ""), } # Keys which are similar to Return and should be bound by default where Return # is bound. RETURN_KEYS = ['<Return>', '<Ctrl-M>', '<Ctrl-J>', '<Shift-Return>', '<Enter>', '<Shift-Enter>'] KEY_DATA = collections.OrderedDict([ ('!normal', collections.OrderedDict([ ('clear-keychain ;; leave-mode', ['<Escape>', '<Ctrl-[>']), ])), ('normal', collections.OrderedDict([ ('clear-keychain ;; search', ['<Escape>']), ('set-cmd-text -s :open', ['o']), ('set-cmd-text :open {url:pretty}', ['go']), ('set-cmd-text -s :open -t', ['O']), ('set-cmd-text :open -t {url:pretty}', ['gO']), ('set-cmd-text -s :open -b', ['xo']), ('set-cmd-text :open -b {url:pretty}', ['xO']), ('set-cmd-text -s :open -w', ['wo']), ('set-cmd-text :open -w {url:pretty}', ['wO']), ('open -t', ['ga', '<Ctrl-T>']), ('open -w', ['<Ctrl-N>']), ('tab-close', ['d', '<Ctrl-W>']), ('tab-close -o', ['D']), ('tab-only', ['co']), ('tab-focus', ['T']), ('tab-move', ['gm']), ('tab-move -', ['gl']), ('tab-move +', ['gr']), ('tab-focus', ['J', '<Ctrl-PgDown>']), ('tab-prev', ['K', '<Ctrl-PgUp>']), ('tab-clone', ['gC']), ('reload', ['r', '<F5>']), ('reload -f', ['R', '<Ctrl-F5>']), ('back', ['H']), ('back -t', ['th']), ('back -w', ['wh']), ('forward', ['L']), ('forward -t', ['tl']), ('forward -w', ['wl']), ('fullscreen', ['<F11>']), ('hint', ['f']), ('hint all tab', ['F']), ('hint all window', ['wf']), ('hint all tab-bg', [';b']), ('hint all tab-fg', [';f']), ('hint all hover', [';h']), ('hint images', [';i']), ('hint images tab', [';I']), ('hint images tab-bg', ['.i']), ('hint links fill ":open {hint-url}"', [';o']), ('hint links fill ":open -t {hint-url}"', [';O']), ('hint links fill ":open -b {hint-url}"', ['.o']), ('hint links yank', [';y']), ('hint links yank-primary', [';Y']), ('hint --rapid links tab-bg', [';r']), ('hint --rapid links window', [';R']), ('hint links download', [';d']), ('scroll left', ['h']), ('scroll down', ['j']), ('scroll up', ['k']), ('scroll right', ['l']), ('undo', ['u', '<Ctrl-Shift-T>']), ('scroll-perc 0', ['gg']), ('scroll-perc', ['G']), ('search-next', ['n']), ('search-prev', ['N']), ('enter-mode insert', ['i']), ('enter-mode caret', ['v']), ('enter-mode set_mark', ['`']), ('enter-mode jump_mark', ["'"]), ('yank', ['yy']), ('yank -s', ['yY']), ('yank -t', ['yt']), ('yank -ts', ['yT']), ('yank -d', ['yd']), ('yank -ds', ['yD']), ('yank -p', ['yp']), ('yank -ps', ['yP']), ('paste', ['pp']), ('paste -s', ['pP']), ('paste -t', ['Pp']), ('paste -ts', ['PP']), ('paste -w', ['wp']), ('paste -ws', ['wP']), ('quickmark-save', ['m']), ('set-cmd-text -s :quickmark-load', ['b']), ('set-cmd-text -s :quickmark-load -t', ['B']), ('set-cmd-text -s :quickmark-load -w', ['wb']), ('bookmark-add', ['M']), ('set-cmd-text -s :bookmark-load', ['gb']), ('set-cmd-text -s :bookmark-load -t', ['gB']), ('set-cmd-text -s :bookmark-load -w', ['wB']), ('save', ['sf']), ('set-cmd-text -s :set', ['ss']), ('set-cmd-text -s :set -t', ['sl']), ('set-cmd-text -s :set keybind', ['sk']), ('zoom-out', ['-']), ('zoom-in', ['+']), ('zoom', ['=']), ('navigate prev', ['[[']), ('navigate next', [']]']), ('navigate prev -t', ['{{']), ('navigate next -t', ['}}']), ('navigate up', ['gu']), ('navigate up -t', ['gU']), ('navigate increment', ['<Ctrl-A>']), ('navigate decrement', ['<Ctrl-X>']), ('inspector', ['wi']), ('download', ['gd']), ('download-cancel', ['ad']), ('download-clear', ['cd']), ('view-source', ['gf']), ('set-cmd-text -s :buffer', ['gt']), ('tab-focus last', ['<Ctrl-Tab>']), ('enter-mode passthrough', ['<Ctrl-V>']), ('quit', ['<Ctrl-Q>']), ('scroll-page 0 1', ['<Ctrl-F>']), ('scroll-page 0 -1', ['<Ctrl-B>']), ('scroll-page 0 0.5', ['<Ctrl-D>']), ('scroll-page 0 -0.5', ['<Ctrl-U>']), ('tab-focus 1', ['<Alt-1>']), ('tab-focus 2', ['<Alt-2>']), ('tab-focus 3', ['<Alt-3>']), ('tab-focus 4', ['<Alt-4>']), ('tab-focus 5', ['<Alt-5>']), ('tab-focus 6', ['<Alt-6>']), ('tab-focus 7', ['<Alt-7>']), ('tab-focus 8', ['<Alt-8>']), ('tab-focus 9', ['<Alt-9>']), ('home', ['<Ctrl-h>']), ('stop', ['<Ctrl-s>']), ('print', ['<Ctrl-Alt-p>']), ('open qute:settings', ['Ss']), ('follow-selected', RETURN_KEYS), ('follow-selected -t', ['<Ctrl-Return>', '<Ctrl-Enter>']), ])), ('insert', collections.OrderedDict([ ('open-editor', ['<Ctrl-E>']), ('paste-primary', ['<Shift-Ins>']), ])), ('hint', collections.OrderedDict([ ('follow-hint', RETURN_KEYS), ('hint --rapid links tab-bg', ['<Ctrl-R>']), ('hint links', ['<Ctrl-F>']), ('hint all tab-bg', ['<Ctrl-B>']), ])), ('passthrough', {}), ('command', collections.OrderedDict([ ('command-history-prev', ['<Ctrl-P>']), ('command-history-next', ['<Ctrl-N>']), ('completion-item-prev', ['<Shift-Tab>', '<Up>']), ('completion-item-next', ['<Tab>', '<Down>']), ('completion-item-del', ['<Ctrl-D>']), ('command-accept', RETURN_KEYS), ])), ('prompt', collections.OrderedDict([ ('prompt-accept', RETURN_KEYS), ('prompt-yes', ['y']), ('prompt-no', ['n']), ])), ('command,prompt', collections.OrderedDict([ ('rl-backward-char', ['<Ctrl-B>']), ('rl-forward-char', ['<Ctrl-F>']), ('rl-backward-word', ['<Alt-B>']), ('rl-forward-word', ['<Alt-F>']), ('rl-beginning-of-line', ['<Ctrl-A>']), ('rl-end-of-line', ['<Ctrl-E>']), ('rl-unix-line-discard', ['<Ctrl-U>']), ('rl-kill-line', ['<Ctrl-K>']), ('rl-kill-word', ['<Alt-D>']), ('rl-unix-word-rubout', ['<Ctrl-W>', '<Alt-Backspace>']), ('rl-yank', ['<Ctrl-Y>']), ('rl-delete-char', ['<Ctrl-?>']), ('rl-backward-delete-char', ['<Ctrl-H>']), ])), ('caret', collections.OrderedDict([ ('toggle-selection', ['v', '<Space>']), ('drop-selection', ['<Ctrl-Space>']), ('enter-mode normal', ['c']), ('move-to-next-line', ['j']), ('move-to-prev-line', ['k']), ('move-to-next-char', ['l']), ('move-to-prev-char', ['h']), ('move-to-end-of-word', ['e']), ('move-to-next-word', ['w']), ('move-to-prev-word', ['b']), ('move-to-start-of-next-block', [']']), ('move-to-start-of-prev-block', ['[']), ('move-to-end-of-next-block', ['}']), ('move-to-end-of-prev-block', ['{']), ('move-to-start-of-line', ['0']), ('move-to-end-of-line', ['$']), ('move-to-start-of-document', ['gg']), ('move-to-end-of-document', ['G']), ('yank-selected -p', ['Y']), ('yank-selected', ['y'] + RETURN_KEYS), ('scroll left', ['H']), ('scroll down', ['J']), ('scroll up', ['K']), ('scroll right', ['L']), ])), ]) # A list of (regex, replacement) tuples of changed key commands. CHANGED_KEY_COMMANDS = [ (re.compile(r'^open -([twb]) about:blank$'), r'open -\1'), (re.compile(r'^download-page$'), r'download'), (re.compile(r'^cancel-download$'), r'download-cancel'), (re.compile(r"""^search (''|"")$"""), r'clear-keychain ;; search'), (re.compile(r'^search$'), r'clear-keychain ;; search'), (re.compile(r"""^set-cmd-text ['"](.*) ['"]$"""), r'set-cmd-text -s \1'), (re.compile(r"""^set-cmd-text ['"](.*)['"]$"""), r'set-cmd-text \1'), (re.compile(r"^hint links rapid$"), r'hint --rapid links tab-bg'), (re.compile(r"^hint links rapid-win$"), r'hint --rapid links window'), (re.compile(r'^scroll -50 0$'), r'scroll left'), (re.compile(r'^scroll 0 50$'), r'scroll down'), (re.compile(r'^scroll 0 -50$'), r'scroll up'), (re.compile(r'^scroll 50 0$'), r'scroll right'), (re.compile(r'^scroll ([-\d]+ [-\d]+)$'), r'scroll-px \1'), (re.compile(r'^search *;; *clear-keychain$'), r'clear-keychain ;; search'), (re.compile(r'^leave-mode$'), r'clear-keychain ;; leave-mode'), (re.compile(r'^download-remove --all$'), r'download-clear'), ]
1
14,806
It'd be nice to fix up existing configs with the change - that'd mean adding the option to `RENAMED_OPTIONS` in `config.py` and adding something like `_get_value_transformer({'true': '', 'false': '*'})` to `CHANGED_OPTIONS`. I think I never tried adding an option to both, but it should work.
qutebrowser-qutebrowser
py
@@ -150,7 +150,7 @@ module.exports = { TestCase.assertEqual(obj.arrayCol1[-1], undefined); TestCase.assertEqual(obj.arrayCol1['foo'], undefined); - for (let field of Object.keys(prim)) { + for (let field of Object.keys(Object.getPrototypeOf(prim))) { TestCase.assertEqual(prim[field][2], undefined); TestCase.assertEqual(prim[field][-1], undefined); TestCase.assertEqual(prim[field]['foo'], undefined);
1
//////////////////////////////////////////////////////////////////////////// // // Copyright 2016 Realm Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////// 'use strict'; const Realm = require('realm'); let TestCase = require('./asserts'); let schemas = require('./schemas'); const DATA1 = new Uint8Array([0x01]); const DATA2 = new Uint8Array([0x02]); const DATA3 = new Uint8Array([0x03]); const DATE1 = new Date(1); const DATE2 = new Date(2); const DATE3 = new Date(3); module.exports = { testListConstructor: function() { const realm = new Realm({schema: [schemas.PersonObject, schemas.PersonList]}); realm.write(() => { let obj = realm.create('PersonList', {list: []}); TestCase.assertInstanceOf(obj.list, Realm.List); TestCase.assertInstanceOf(obj.list, Realm.Collection); }); TestCase.assertThrowsContaining(() => new Realm.List(), 'constructor'); TestCase.assertInstanceOf(Realm.List, Function); }, testListType: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject, schemas.PrimitiveArrays]}); let obj, prim; realm.write(() => { obj = realm.create('LinkTypesObject', {}); prim = realm.create('PrimitiveArrays', {}); }); TestCase.assertEqual(obj.arrayCol.type, 'object'); TestCase.assertEqual(obj.arrayCol1.type, 'object'); TestCase.assertEqual(prim.bool.type, 'bool'); TestCase.assertEqual(prim.int.type, 'int'); TestCase.assertEqual(prim.float.type, 'float'); TestCase.assertEqual(prim.double.type, 'double'); TestCase.assertEqual(prim.string.type, 'string'); TestCase.assertEqual(prim.date.type, 'date'); TestCase.assertEqual(prim.optBool.type, 'bool'); TestCase.assertEqual(prim.optInt.type, 'int'); TestCase.assertEqual(prim.optFloat.type, 'float'); TestCase.assertEqual(prim.optDouble.type, 'double'); TestCase.assertEqual(prim.optString.type, 'string'); TestCase.assertEqual(prim.optDate.type, 'date'); TestCase.assertFalse(prim.bool.optional); TestCase.assertFalse(prim.int.optional); TestCase.assertFalse(prim.float.optional); TestCase.assertFalse(prim.double.optional); TestCase.assertFalse(prim.string.optional); TestCase.assertFalse(prim.date.optional); TestCase.assertTrue(prim.optBool.optional); TestCase.assertTrue(prim.optInt.optional); TestCase.assertTrue(prim.optFloat.optional); TestCase.assertTrue(prim.optDouble.optional); TestCase.assertTrue(prim.optString.optional); TestCase.assertTrue(prim.optDate.optional); }, testListLength: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject]}); let array; realm.write(() => { let obj = realm.create('LinkTypesObject', { objectCol: {doubleCol: 1}, objectCol1: {doubleCol: 2}, arrayCol: [{doubleCol: 3}], }); array = obj.arrayCol; TestCase.assertEqual(array.length, 1); obj.arrayCol = []; TestCase.assertEqual(array.length, 0); obj.arrayCol = [{doubleCol: 1}, {doubleCol: 2}]; TestCase.assertEqual(array.length, 2); TestCase.assertThrowsContaining(() => array.length = 0, "Cannot assign to read only property 'length'"); }); TestCase.assertEqual(array.length, 2); }, testListSubscriptGetters: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject, schemas.PrimitiveArrays]}); let obj, prim; realm.write(() => { obj = realm.create('LinkTypesObject', { objectCol: {doubleCol: 1}, objectCol1: {doubleCol: 2}, arrayCol: [{doubleCol: 3}, {doubleCol: 4}], arrayCol1: [{doubleCol: 5}, {doubleCol: 6}], }); prim = realm.create('PrimitiveArrays', { bool: [true, false], int: [1, 2], float: [1.1, 2.2], double: [1.11, 2.22], string: ['a', 'b'], date: [new Date(1), new Date(2)], data: [DATA1, DATA2], optBool: [true, null], optInt: [1, null], optFloat: [1.1, null], optDouble: [1.11, null], optString: ['a', null], optDate: [new Date(1), null], optData: [DATA1, null], }); }); TestCase.assertEqual(obj.arrayCol[0].doubleCol, 3); TestCase.assertEqual(obj.arrayCol[1].doubleCol, 4); TestCase.assertEqual(obj.arrayCol[2], undefined); TestCase.assertEqual(obj.arrayCol[-1], undefined); TestCase.assertEqual(obj.arrayCol['foo'], undefined); TestCase.assertEqual(obj.arrayCol1[0].doubleCol, 5); TestCase.assertEqual(obj.arrayCol1[1].doubleCol, 6); TestCase.assertEqual(obj.arrayCol1[2], undefined); TestCase.assertEqual(obj.arrayCol1[-1], undefined); TestCase.assertEqual(obj.arrayCol1['foo'], undefined); for (let field of Object.keys(prim)) { TestCase.assertEqual(prim[field][2], undefined); TestCase.assertEqual(prim[field][-1], undefined); TestCase.assertEqual(prim[field]['foo'], undefined); if (field.includes('opt')) { TestCase.assertEqual(prim[field][1], null); } } TestCase.assertSimilar('bool', prim.bool[0], true); TestCase.assertSimilar('bool', prim.bool[1], false); TestCase.assertSimilar('int', prim.int[0], 1); TestCase.assertSimilar('int', prim.int[1], 2); TestCase.assertSimilar('float', prim.float[0], 1.1); TestCase.assertSimilar('float', prim.float[1], 2.2); TestCase.assertSimilar('double', prim.double[0], 1.11); TestCase.assertSimilar('double', prim.double[1], 2.22); TestCase.assertSimilar('string', prim.string[0], 'a'); TestCase.assertSimilar('string', prim.string[1], 'b'); TestCase.assertSimilar('data', prim.data[0], DATA1); TestCase.assertSimilar('data', prim.data[1], DATA2); TestCase.assertSimilar('date', prim.date[0], new Date(1)); TestCase.assertSimilar('date', prim.date[1], new Date(2)); TestCase.assertSimilar('bool', prim.optBool[0], true); TestCase.assertSimilar('int', prim.optInt[0], 1); TestCase.assertSimilar('float', prim.optFloat[0], 1.1); TestCase.assertSimilar('double', prim.optDouble[0], 1.11); TestCase.assertSimilar('string', prim.optString[0], 'a'); TestCase.assertSimilar('data', prim.optData[0], DATA1); TestCase.assertSimilar('date', prim.optDate[0], new Date(1)); }, testListSubscriptSetters: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject, schemas.PrimitiveArrays]}); let array; realm.write(() => { let obj = realm.create('LinkTypesObject', { objectCol: {doubleCol: 1}, objectCol1: {doubleCol: 2}, arrayCol: [{doubleCol: 3}, {doubleCol: 4}], }); let prim = realm.create('PrimitiveArrays', {}); array = obj.arrayCol; array[0] = {doubleCol: 5}; array[1] = {doubleCol: 6}; TestCase.assertEqual(array[0].doubleCol, 5); TestCase.assertEqual(array[1].doubleCol, 6); array[0] = obj.objectCol; array[1] = obj.objectCol1; TestCase.assertEqual(array[0].doubleCol, 1); TestCase.assertEqual(array[1].doubleCol, 2); TestCase.assertThrowsContaining(() => array[0] = null, "JS value must be of type 'object', got (null)"); TestCase.assertThrowsContaining(() => array[0] = {}, "Missing value for property 'TestObject.doubleCol'"); TestCase.assertThrowsContaining(() => array[0] = {foo: 'bar'}, "Missing value for property 'TestObject.doubleCol'"); TestCase.assertThrowsContaining(() => array[0] = prim, "Object of type (PrimitiveArrays) does not match List type (TestObject)"); TestCase.assertThrowsContaining(() => array[0] = array, "Missing value for property 'TestObject.doubleCol'"); TestCase.assertThrowsContaining(() => array[2] = {doubleCol: 1}, "Requested index 2 greater than max 1"); TestCase.assertThrowsContaining(() => array[-1] = {doubleCol: 1}, "Index -1 cannot be less than zero."); array['foo'] = 'bar'; TestCase.assertEqual(array.foo, 'bar'); function testAssign(name, v1, v2) { prim[name].push(v1); TestCase.assertSimilar(prim[name].type, prim[name][0], v1, undefined, 1); prim[name][0] = v2; TestCase.assertSimilar(prim[name].type, prim[name][0], v2, undefined, 1); } testAssign('bool', true, false); testAssign('int', 1, 2); testAssign('float', 1.1, 2.2); testAssign('double', 1.1, 2.2); testAssign('string', 'a', 'b'); testAssign('data', DATA1, DATA2); testAssign('date', DATE1, DATE2); function testAssignNull(name, expected) { TestCase.assertThrowsContaining(() => prim[name][0] = null, `Property must be of type '${expected}', got (null)`, undefined, 1); } testAssignNull('bool', 'bool'); testAssignNull('int', 'int'); testAssignNull('float', 'float'); testAssignNull('double', 'double'); testAssignNull('string', 'string'); testAssignNull('data', 'data'); testAssignNull('date', 'date'); testAssign('optBool', true, null); testAssign('optInt', 1, null); testAssign('optFloat', 1.1, null); testAssign('optDouble', 1.1, null); testAssign('optString', 'a', null); testAssign('optData', DATA1, null); testAssign('optDate', DATE1, null); }); TestCase.assertThrowsContaining(() => array[0] = {doubleCol: 1}, "Cannot modify managed objects outside of a write transaction."); array['foo'] = 'baz'; TestCase.assertEqual(array.foo, 'baz'); }, testListAssignment: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject, schemas.PersonList, schemas.PersonObject, schemas.PrimitiveArrays]}); let obj, prim; realm.write(() => { obj = realm.create('LinkTypesObject', {}); prim = realm.create('PrimitiveArrays', {}); let person = realm.create('PersonObject', {name: 'a', age: 2.0}); let personList = realm.create('PersonList', {list: [person]}).list; TestCase.assertThrowsContaining(() => obj.arrayCol = [0], "JS value must be of type 'object', got (0)"); TestCase.assertThrowsContaining(() => obj.arrayCol = [null], "JS value must be of type 'object', got (null)"); TestCase.assertThrowsContaining(() => obj.arrayCol = [person], "Object of type (PersonObject) does not match List type (TestObject)"); TestCase.assertThrowsContaining(() => obj.arrayCol = personList, "LinkTypesObject.arrayCol must be of type 'TestObject[]', got 'object' ("); obj.arrayCol = [realm.create('TestObject', {doubleCol: 1.0})] TestCase.assertEqual(obj.arrayCol[0].doubleCol, 1.0); obj.arrayCol = obj.arrayCol; // eslint-disable-line no-self-assign TestCase.assertEqual(obj.arrayCol[0].doubleCol, 1.0); TestCase.assertThrowsContaining(() => prim.bool = [person], "PrimitiveArrays.bool must be of type 'boolean[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.int = [person], "PrimitiveArrays.int must be of type 'number[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.float = [person], "PrimitiveArrays.float must be of type 'number[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.double = [person], "PrimitiveArrays.double must be of type 'number[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.string = [person], "PrimitiveArrays.string must be of type 'string[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.data = [person], "PrimitiveArrays.data must be of type 'binary[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.date = [person], "PrimitiveArrays.date must be of type 'date[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.optBool = [person], "PrimitiveArrays.optBool must be of type 'boolean?[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.optInt = [person], "PrimitiveArrays.optInt must be of type 'number?[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.optFloat = [person], "PrimitiveArrays.optFloat must be of type 'number?[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.optDouble = [person], "PrimitiveArrays.optDouble must be of type 'number?[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.optString = [person], "PrimitiveArrays.optString must be of type 'string?[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.optData = [person], "PrimitiveArrays.optData must be of type 'binary?[]', got 'object' ([PersonObject{"); TestCase.assertThrowsContaining(() => prim.optDate = [person], "PrimitiveArrays.optDate must be of type 'date?[]', got 'object' ([PersonObject{"); function testAssign(name, value) { prim[name] = [value]; TestCase.assertSimilar(prim[name].type, prim[name][0], value, undefined, 1); } testAssign('bool', true); testAssign('int', 1); testAssign('float', 1.1); testAssign('double', 1.1); testAssign('string', 'a'); testAssign('data', DATA1); testAssign('date', DATE1); function testAssignNull(name, expected) { TestCase.assertThrowsContaining(() => prim[name] = [null], `PrimitiveArrays.${name} must be of type '${expected}[]', got 'object' ([null])`, undefined, 1); TestCase.assertEqual(prim[name].length, 1, "List should not have been cleared by invalid assignment", 1); } testAssignNull('bool', 'boolean'); testAssignNull('int', 'number'); testAssignNull('float', 'number'); testAssignNull('double', 'number'); testAssignNull('string', 'string'); testAssignNull('data', 'binary'); testAssignNull('date', 'date'); testAssign('optBool', true); testAssign('optInt', 1); testAssign('optFloat', 1.1); testAssign('optDouble', 1.1); testAssign('optString', 'a'); testAssign('optData', DATA1); testAssign('optDate', DATE1); testAssign('optBool', null); testAssign('optInt', null); testAssign('optFloat', null); testAssign('optDouble', null); testAssign('optString', null); testAssign('optData', null); testAssign('optDate', null); }); TestCase.assertThrowsContaining(() => obj.arrayCol = [], "Cannot modify managed objects outside of a write transaction."); TestCase.assertThrowsContaining(() => prim.bool = [], "Cannot modify managed objects outside of a write transaction."); }, testListEnumerate: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject]}); let obj; realm.write(() => { obj = realm.create('LinkTypesObject', { objectCol: {doubleCol: 1}, objectCol1: {doubleCol: 2}, arrayCol: [], }); }); for (const index in obj.arrayCol) { TestCase.assertTrue(false, "No objects should have been enumerated: " + index); } realm.write(() => { obj.arrayCol = [{doubleCol: 0}, {doubleCol: 1}]; }); TestCase.assertEqual(obj.arrayCol.length, 2); let count = 0; let keys = Object.keys(obj.arrayCol); for (const index in obj.arrayCol) { TestCase.assertEqual(count++, +index); TestCase.assertEqual(keys[index], index); } TestCase.assertEqual(count, 2); TestCase.assertEqual(keys.length, 2); }, testListPush: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject]}); let array; realm.write(() => { let obj = realm.create('LinkTypesObject', { objectCol: {doubleCol: 1}, objectCol1: {doubleCol: 2}, arrayCol: [{doubleCol: 3}], }); array = obj.arrayCol; TestCase.assertEqual(array.length, 1); TestCase.assertEqual(array.push({doubleCol: 4}), 2); TestCase.assertEqual(array.length, 2); TestCase.assertEqual(array[1].doubleCol, 4); TestCase.assertEqual(array.push(obj.objectCol, obj.objectCol1), 4); TestCase.assertEqual(array.length, 4); TestCase.assertEqual(array[2].doubleCol, 1); TestCase.assertEqual(array[3].doubleCol, 2); TestCase.assertEqual(array.push(), 4); TestCase.assertEqual(array.length, 4); }); TestCase.assertEqual(array.length, 4); TestCase.assertThrowsContaining(() => { array.push([1]); }, "Cannot modify managed objects outside of a write transaction."); }, testListPop: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject]}); let array; realm.write(() => { let obj = realm.create('LinkTypesObject', { objectCol: {doubleCol: 1}, objectCol1: {doubleCol: 2}, arrayCol: [{doubleCol: 3}, {doubleCol: 4}], }); array = obj.arrayCol; TestCase.assertEqual(array.pop().doubleCol, 4); TestCase.assertEqual(array.pop().doubleCol, 3); TestCase.assertEqual(array.length, 0); TestCase.assertEqual(array.pop(), undefined); TestCase.assertThrowsContaining(() => array.pop(1), 'Invalid argument'); }); TestCase.assertThrowsContaining(() => array.pop(), "Cannot modify managed objects outside of a write transaction."); }, testListUnshift: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject]}); let array; realm.write(() => { let obj = realm.create('LinkTypesObject', { objectCol: {doubleCol: 1}, objectCol1: {doubleCol: 2}, arrayCol: [{doubleCol: 3}], }); array = obj.arrayCol; TestCase.assertEqual(array.length, 1); TestCase.assertEqual(array.unshift({doubleCol: 5}), 2); TestCase.assertEqual(array.length, 2); TestCase.assertEqual(array[0].doubleCol, 5); TestCase.assertEqual(array.unshift(obj.objectCol, obj.objectCol1), 4); TestCase.assertEqual(array.length, 4); TestCase.assertEqual(array[0].doubleCol, 1); TestCase.assertEqual(array[1].doubleCol, 2); TestCase.assertEqual(array.unshift(), 4); TestCase.assertEqual(array.length, 4); }); TestCase.assertEqual(array.length, 4); TestCase.assertThrowsContaining(() => array.unshift({doubleCol: 1}), 'Cannot modify managed objects outside of a write transaction.'); }, testListShift: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject]}); let array; realm.write(() => { let obj = realm.create('LinkTypesObject', { objectCol: {doubleCol: 1}, objectCol1: {doubleCol: 2}, arrayCol: [{doubleCol: 3}, {doubleCol: 4}], }); array = obj.arrayCol; TestCase.assertEqual(array.shift().doubleCol, 3); TestCase.assertEqual(array.shift().doubleCol, 4); TestCase.assertEqual(array.length, 0); TestCase.assertEqual(array.shift(), undefined); TestCase.assertThrowsContaining(() => array.shift(1), 'Invalid argument'); }); TestCase.assertThrowsContaining(() => { array.shift(); }, "Cannot modify managed objects outside of a write transaction."); }, testListSplice: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject]}); let array; realm.write(() => { let obj = realm.create('LinkTypesObject', { objectCol: {doubleCol: 1}, objectCol1: {doubleCol: 2}, arrayCol: [{doubleCol: 3}, {doubleCol: 4}], }); array = obj.arrayCol; let removed; removed = array.splice(0, 0, obj.objectCol, obj.objectCol1); TestCase.assertEqual(removed.length, 0); TestCase.assertEqual(array.length, 4); TestCase.assertEqual(array[0].doubleCol, 1); TestCase.assertEqual(array[1].doubleCol, 2); removed = array.splice(2, 2, {doubleCol: 5}, {doubleCol: 6}); TestCase.assertEqual(removed.length, 2); TestCase.assertEqual(removed[0].doubleCol, 3); TestCase.assertEqual(removed[1].doubleCol, 4); TestCase.assertEqual(array.length, 4); TestCase.assertEqual(array[2].doubleCol, 5); TestCase.assertEqual(array[3].doubleCol, 6); removed = array.splice(2, 2); TestCase.assertEqual(removed.length, 2); TestCase.assertEqual(removed[0].doubleCol, 5); TestCase.assertEqual(removed[1].doubleCol, 6); TestCase.assertEqual(array.length, 2); TestCase.assertEqual(array[0].doubleCol, 1); TestCase.assertEqual(array[1].doubleCol, 2); removed = array.splice(-1, 1); TestCase.assertEqual(removed.length, 1); TestCase.assertEqual(removed[0].doubleCol, 2); TestCase.assertEqual(array.length, 1); TestCase.assertEqual(array[0].doubleCol, 1); removed = array.splice(0, 2); TestCase.assertEqual(removed.length, 1); TestCase.assertEqual(removed[0].doubleCol, 1); TestCase.assertEqual(array.length, 0); removed = array.splice('0', '0', obj.objectCol); TestCase.assertEqual(removed.length, 0); TestCase.assertEqual(array.length, 1); removed = array.splice(1); TestCase.assertEqual(removed.length, 0); TestCase.assertEqual(array.length, 1); removed = array.splice(0); TestCase.assertEqual(removed.length, 1); TestCase.assertEqual(array.length, 0); TestCase.assertThrowsContaining(() => { array.splice('cat', 1); }, "Value 'cat' not convertible to a number"); TestCase.assertThrowsContaining(() => { array.splice(0, 0, 0); }, "JS value must be of type 'object', got (0)"); }); TestCase.assertThrowsContaining(() => { array.splice(0, 0, {doubleCol: 1}); }, "Cannot modify managed objects outside of a write transaction"); }, testListDeletions: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject]}); let object; let array; realm.write(() => { object = realm.create('LinkTypesObject', { objectCol: {doubleCol: 1}, objectCol1: {doubleCol: 2}, arrayCol: [{doubleCol: 3}, {doubleCol: 4}], }); array = object.arrayCol; }); try { realm.write(() => { realm.delete(array[0]); TestCase.assertEqual(array.length, 1); TestCase.assertEqual(array[0].doubleCol, 4); // This should cancel the transaction and cause the list to be reset. throw new Error('Transaction FAIL'); }); } catch (e) {} TestCase.assertEqual(array.length, 2); TestCase.assertEqual(array[0].doubleCol, 3); realm.write(() => { realm.delete(object); }); TestCase.assertThrowsContaining(() => array[0], 'invalidated'); }, testLiveUpdatingResults: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject]}); let objects = realm.objects('TestObject'); let array; realm.write(() => { let obj = realm.create('LinkTypesObject', { objectCol: {doubleCol: 1}, objectCol1: {doubleCol: 2}, arrayCol: [{doubleCol: 3}, {doubleCol: 4}], }); array = obj.arrayCol; }); TestCase.assertEqual(array.length, 2); TestCase.assertEqual(objects.length, 4); try { realm.write(() => { array.push({doubleCol: 5}); TestCase.assertEqual(objects.length, 5); array.unshift({doubleCol: 2}); TestCase.assertEqual(objects.length, 6); array.splice(0, 0, {doubleCol: 1}); TestCase.assertEqual(objects.length, 7); array.push(objects[0], objects[1]); TestCase.assertEqual(objects.length, 7); // This should cancel the transaction and cause the list and results to be reset. throw new Error('Transaction FAIL'); }); } catch (e) {} TestCase.assertEqual(array.length, 2); TestCase.assertEqual(objects.length, 4); }, testListSnapshot: function() { const realm = new Realm({schema: [schemas.LinkTypes, schemas.TestObject]}); let objects = realm.objects('TestObject'); let array; realm.write(() => { let obj = realm.create('LinkTypesObject', [[1], [2], [[3], [4]], [[5], [6]]]); array = obj.arrayCol; }); let objectsCopy = objects.snapshot(); let arrayCopy = array.snapshot(); TestCase.assertEqual(objectsCopy.length, 6); TestCase.assertEqual(arrayCopy.length, 2); realm.write(() => { array.push([5]); TestCase.assertEqual(objectsCopy.length, 6); TestCase.assertEqual(arrayCopy.length, 2); TestCase.assertEqual(objectsCopy.snapshot().length, 6); TestCase.assertEqual(arrayCopy.snapshot().length, 2); TestCase.assertEqual(objects.snapshot().length, 7); TestCase.assertEqual(array.snapshot().length, 3); realm.delete(array[0]); TestCase.assertEqual(objectsCopy.length, 6); TestCase.assertEqual(arrayCopy.length, 2); TestCase.assertEqual(arrayCopy[0], null); realm.deleteAll(); TestCase.assertEqual(objectsCopy.length, 6); TestCase.assertEqual(arrayCopy.length, 2); TestCase.assertEqual(arrayCopy[1], null); }); }, testListFiltered: function() { const realm = new Realm({schema: [schemas.PersonObject, schemas.PersonList]}); let list; realm.write(() => { let object = realm.create('PersonList', {list: [ {name: 'Ari', age: 10}, {name: 'Tim', age: 11}, {name: 'Bjarne', age: 12}, {name: 'Alex', age: 12, married: true} ]}); realm.create('PersonObject', {name: 'NotInList', age: 10}); list = object.list; }); TestCase.assertEqual(list.filtered("truepredicate").length, 4); TestCase.assertEqual(list.filtered('age = 11')[0].name, 'Tim'); TestCase.assertEqual(list.filtered('age = 12').length, 2); TestCase.assertEqual(list.filtered('age > 10 && age < 13').length, 3); TestCase.assertEqual(list.filtered('age > 10').filtered('age < 13').length, 3); }, testListSorted: function() { const schema = [ {name: 'Target', properties: {value: 'int'}}, {name: 'Mid', properties: {value: 'int', link: 'Target'}}, {name: 'List', properties: {list: {type: 'list', objectType: 'Mid'}}}, schemas.PrimitiveArrays ]; const realm = new Realm({schema: schema}); let list, prim; realm.write(() => { list = realm.create('List', {list: [ {value: 3, link: {value: 1}}, {value: 1, link: {value: 3}}, {value: 2, link: {value: 2}}, ]}).list; realm.create('List', {list: [ {value: 4, link: {value: 4}}, ]}); prim = realm.create('PrimitiveArrays', { bool: [true, false], int: [3, 1, 2], float: [3, 1, 2], double: [3, 1, 2], string: ['c', 'a', 'b'], data: [DATA3, DATA1, DATA2], date: [DATE3, DATE1, DATE2], optBool: [true, false, null], optInt: [3, 1, 2, null], optFloat: [3, 1, 2, null], optDouble: [3, 1, 2, null], optString: ['c', 'a', 'b', null], optData: [DATA3, DATA1, DATA2, null], optDate: [DATE3, DATE1, DATE2, null], }); }); const values = (results) => results.map((o) => o.value); // TestCase.assertThrowsContaining(() => list.sorted()); TestCase.assertThrowsContaining(() => list.sorted('nonexistent property'), "Cannot sort on key path 'nonexistent property': property 'Mid.nonexistent property' does not exist."); TestCase.assertThrowsContaining(() => list.sorted('link'), "Cannot sort on key path 'link': property 'Mid.link' of type 'object' cannot be the final property in the key path."); TestCase.assertArraysEqual(values(list.sorted([])), [3, 1, 2]); TestCase.assertArraysEqual(values(list.sorted('value')), [1, 2, 3]); TestCase.assertArraysEqual(values(list.sorted('value', false)), [1, 2, 3]); TestCase.assertArraysEqual(values(list.sorted('value', true)), [3, 2, 1]); TestCase.assertArraysEqual(values(list.sorted(['value'])), [1, 2, 3]); TestCase.assertArraysEqual(values(list.sorted([['value', false]])), [1, 2, 3]); TestCase.assertArraysEqual(values(list.sorted([['value', true]])), [3, 2, 1]); TestCase.assertArraysEqual(values(list.sorted('link.value')), [3, 2, 1]); TestCase.assertArraysEqual(values(list.sorted('link.value', false)), [3, 2, 1]); TestCase.assertArraysEqual(values(list.sorted('link.value', true)), [1, 2, 3]); TestCase.assertArraysEqual(values(list.sorted(['link.value'])), [3, 2, 1]); TestCase.assertArraysEqual(values(list.sorted([['link.value', false]])), [3, 2, 1]); TestCase.assertArraysEqual(values(list.sorted([['link.value', true]])), [1, 2, 3]); TestCase.assertThrowsContaining(() => prim.int.sorted('value', true), "Cannot sort on key path 'value': arrays of 'int' can only be sorted on 'self'"); TestCase.assertThrowsContaining(() => prim.int.sorted('!ARRAY_VALUE', true), "Cannot sort on key path '!ARRAY_VALUE': arrays of 'int' can only be sorted on 'self'"); TestCase.assertArraysEqual(prim.int.sorted([]), [3, 1, 2]); TestCase.assertArraysEqual(prim.int.sorted(), [1, 2, 3]); TestCase.assertArraysEqual(prim.int.sorted(false), [1, 2, 3]); TestCase.assertArraysEqual(prim.int.sorted(true), [3, 2, 1]); TestCase.assertArraysEqual(prim.optInt.sorted([]), [3, 1, 2, null]); TestCase.assertArraysEqual(prim.optInt.sorted(), [null, 1, 2, 3]); TestCase.assertArraysEqual(prim.optInt.sorted(false), [null, 1, 2, 3]); TestCase.assertArraysEqual(prim.optInt.sorted(true), [3, 2, 1, null]); TestCase.assertArraysEqual(prim.bool.sorted(), [false, true]); TestCase.assertArraysEqual(prim.float.sorted(), [1, 2, 3]); TestCase.assertArraysEqual(prim.double.sorted(), [1, 2, 3]); TestCase.assertArraysEqual(prim.string.sorted(), ['a', 'b', 'c']); TestCase.assertArraysEqual(prim.data.sorted(), [DATA1, DATA2, DATA3]); TestCase.assertArraysEqual(prim.date.sorted(), [DATE1, DATE2, DATE3]); TestCase.assertArraysEqual(prim.optBool.sorted(), [null, false, true]); TestCase.assertArraysEqual(prim.optFloat.sorted(), [null, 1, 2, 3]); TestCase.assertArraysEqual(prim.optDouble.sorted(), [null, 1, 2, 3]); TestCase.assertArraysEqual(prim.optString.sorted(), [null, 'a', 'b', 'c']); TestCase.assertArraysEqual(prim.optData.sorted(), [null, DATA1, DATA2, DATA3]); TestCase.assertArraysEqual(prim.optDate.sorted(), [null, DATE1, DATE2, DATE3]); }, testArrayMethods: function() { const realm = new Realm({schema: [schemas.PersonObject, schemas.PersonList, schemas.PrimitiveArrays]}); let object, prim; realm.write(() => { object = realm.create('PersonList', {list: [ {name: 'Ari', age: 10}, {name: 'Tim', age: 11}, {name: 'Bjarne', age: 12}, ]}); prim = realm.create('PrimitiveArrays', {int: [10, 11, 12]}); }); for (const list of [object.list, realm.objects('PersonObject')]) { TestCase.assertEqual(list.slice().length, 3); TestCase.assertEqual(list.slice(-1).length, 1); TestCase.assertEqual(list.slice(-1)[0].age, 12); TestCase.assertEqual(list.slice(1, 3).length, 2); TestCase.assertEqual(list.slice(1, 3)[1].age, 12); // A Node 6 regression in v8 causes an error when converting our objects to strings: // TypeError: Cannot convert a Symbol value to a string if (!TestCase.isNode6()) { TestCase.assertEqual(list.join(' '), 'Ari Tim Bjarne'); } let count = 0; list.forEach((p, i) => { TestCase.assertEqual(p.name, list[i].name); count++; }); TestCase.assertEqual(count, list.length); TestCase.assertArraysEqual(list.map(p => p.age), [10, 11, 12]); TestCase.assertTrue(list.some(p => p.age > 10)); TestCase.assertTrue(list.every(p => p.age > 0)); let person = list.find(p => p.name == 'Tim'); TestCase.assertEqual(person.name, 'Tim'); let index = list.findIndex(p => p.name == 'Tim'); TestCase.assertEqual(index, 1); TestCase.assertEqual(list.indexOf(list[index]), index); TestCase.assertEqual(list.reduce((n, p) => n + p.age, 0), 33); TestCase.assertEqual(list.reduceRight((n, p) => n + p.age, 0), 33); // eslint-disable-next-line no-undef let iteratorMethodNames = ['entries', 'keys', 'values']; iteratorMethodNames.push(Symbol.iterator); iteratorMethodNames.forEach(methodName => { let iterator = list[methodName](); let count = 0; let result; // This iterator should itself be iterable. // TestCase.assertEqual(iterator[iteratorSymbol](), iterator); TestCase.assertEqual(iterator[Symbol.iterator](), iterator); while ((result = iterator.next()) && !result.done) { let value = result.value; switch (methodName) { case 'entries': TestCase.assertEqual(value.length, 2); TestCase.assertEqual(value[0], count); TestCase.assertEqual(value[1].name, list[count].name); break; case 'keys': TestCase.assertEqual(value, count); break; default: TestCase.assertEqual(value.name, list[count].name); break; } count++; } TestCase.assertEqual(result.done, true); TestCase.assertEqual(result.value, undefined); TestCase.assertEqual(count, list.length); }); } const list = prim.int; TestCase.assertEqual(list.slice().length, 3); TestCase.assertEqual(list.slice(-1).length, 1); TestCase.assertEqual(list.slice(-1)[0], 12); TestCase.assertEqual(list.slice(1, 3).length, 2); TestCase.assertEqual(list.slice(1, 3)[1], 12); TestCase.assertEqual(list.join(' '), '10 11 12'); let count = 0; list.forEach((v, i) => { TestCase.assertEqual(v, i + 10); count++; }); TestCase.assertEqual(count, list.length); TestCase.assertArraysEqual(list.map(p => p + 1), [11, 12, 13]); TestCase.assertTrue(list.some(p => p > 10)); TestCase.assertTrue(list.every(p => p > 0)); let value = list.find(p => p == 11); TestCase.assertEqual(value, 11) let index = list.findIndex(p => p == 11); TestCase.assertEqual(index, 1); TestCase.assertEqual(list.indexOf(list[index]), index); TestCase.assertEqual(list.reduce((n, p) => n + p, 0), 33); TestCase.assertEqual(list.reduceRight((n, p) => n + p, 0), 33); // eslint-disable-next-line no-undef let iteratorMethodNames = ['entries', 'keys', 'values']; iteratorMethodNames.push(Symbol.iterator); iteratorMethodNames.forEach(methodName => { let iterator = list[methodName](); let count = 0; let result; // This iterator should itself be iterable. // TestCase.assertEqual(iterator[iteratorSymbol](), iterator); TestCase.assertEqual(iterator[Symbol.iterator](), iterator); while ((result = iterator.next()) && !result.done) { let value = result.value; switch (methodName) { case 'entries': TestCase.assertEqual(value.length, 2); TestCase.assertEqual(value[0], count); TestCase.assertEqual(value[1], list[count]); break; case 'keys': TestCase.assertEqual(value, count); break; default: TestCase.assertEqual(value.name, list[count].name); break; } count++; } TestCase.assertEqual(result.done, true); TestCase.assertEqual(result.value, undefined); TestCase.assertEqual(count, list.length); }); }, testPagenation: function() { const realm = new Realm({schema: [schemas.StringOnly]}); realm.write(() => { for (let i = 0; i < 10; i++) { realm.create(schemas.StringOnly.name, { stringCol: `${i}` }); } }); let objects = realm.objects(schemas.StringOnly.name); let page1 = objects.slice(0, 5); let page2 = objects.slice(5, 10); TestCase.assertEqual(page1.length, 5); TestCase.assertEqual(page2.length, 5); for (let i = 0; i < 5; i++) { TestCase.assertEqual(page1[i]['stringCol'], `${i}`); TestCase.assertEqual(page2[i]['stringCol'], `${i + 5}`); } realm.close(); }, testIsValid: function() { const realm = new Realm({schema: [schemas.PersonObject, schemas.PersonList]}); let object; let list; realm.write(() => { object = realm.create('PersonList', {list: [ {name: 'Ari', age: 10}, {name: 'Tim', age: 11}, {name: 'Bjarne', age: 12}, ]}); list = object.list; TestCase.assertEqual(list.isValid(), true); realm.delete(object); }); TestCase.assertEqual(list.isValid(), false); TestCase.assertThrowsContaining(() => list.length, 'invalidated'); }, testIsEmpty: function () { const realm = new Realm({ schema: [schemas.PersonObject, schemas.PersonList] }); let object; realm.write(() => { object = realm.create('PersonList', { list: [ ] }); }); TestCase.assertTrue(object.list.isEmpty()); realm.write(() => { object.list = [ { name: 'Bob', age: 42 }, { name: 'Alice', age: 42 } ] }); TestCase.assertFalse(object.list.isEmpty()); realm.close(); }, testListAggregateFunctions: function() { const NullableBasicTypesList = { name: 'NullableBasicTypesList', properties: { list: 'NullableBasicTypesObject[]', } }; const realm = new Realm({schema: [schemas.NullableBasicTypes, NullableBasicTypesList]}); const N = 50; const list = []; for (let i = 0; i < N; i++) { list.push({ intCol: i+1, floatCol: i+1, doubleCol: i+1, dateCol: new Date(i+1) }); } let object; realm.write(() => { object = realm.create('NullableBasicTypesList', {list: list}); }); TestCase.assertEqual(object.list.length, N); // int, float & double columns support all aggregate functions ['intCol', 'floatCol', 'doubleCol'].forEach(colName => { TestCase.assertEqual(object.list.min(colName), 1); TestCase.assertEqual(object.list.max(colName), N); TestCase.assertEqual(object.list.sum(colName), N*(N+1)/2); TestCase.assertEqual(object.list.avg(colName), (N+1)/2); }); // date columns support only 'min' & 'max' TestCase.assertEqual(object.list.min('dateCol').getTime(), new Date(1).getTime()); TestCase.assertEqual(object.list.max('dateCol').getTime(), new Date(N).getTime()); }, testListAggregateFunctionsWithNullColumnValues: function() { const NullableBasicTypesList = { name: 'NullableBasicTypesList', properties: { list: 'NullableBasicTypesObject[]', } }; const realm = new Realm({schema: [schemas.NullableBasicTypes, NullableBasicTypesList]}); const N = 50; const M = 10; const list = []; for (let i = 0; i < N; i++) { list.push({ intCol: i+1, floatCol: i+1, doubleCol: i+1, dateCol: new Date(i+1) }); } for (let j = 0; j < M; j++) { list.push({}); } let object, objectEmptyList; realm.write(() => { object = realm.create('NullableBasicTypesList', {list: list}); objectEmptyList = realm.create('NullableBasicTypesList', {list: []}); }); TestCase.assertEqual(object.list.length, N + M); // int, float & double columns support all aggregate functions // the M null valued objects should be ignored ['intCol', 'floatCol', 'doubleCol'].forEach(colName => { TestCase.assertEqual(object.list.min(colName), 1); TestCase.assertEqual(object.list.max(colName), N); TestCase.assertEqual(object.list.sum(colName), N*(N+1)/2); TestCase.assertEqual(object.list.avg(colName), (N+1)/2); }); // date columns support only 'min' & 'max' TestCase.assertEqual(object.list.min('dateCol').getTime(), new Date(1).getTime()); TestCase.assertEqual(object.list.max('dateCol').getTime(), new Date(N).getTime()); // call aggregate functions on empty list TestCase.assertEqual(objectEmptyList.list.length, 0); ['intCol', 'floatCol', 'doubleCol'].forEach(colName => { TestCase.assertUndefined(objectEmptyList.list.min(colName)); TestCase.assertUndefined(objectEmptyList.list.max(colName)); TestCase.assertEqual(objectEmptyList.list.sum(colName), 0); TestCase.assertUndefined(objectEmptyList.list.avg(colName)); }); TestCase.assertUndefined(objectEmptyList.list.min('dateCol')); TestCase.assertUndefined(objectEmptyList.list.max('dateCol')); }, testPrimitiveListAggregateFunctions: function() { const realm = new Realm({schema: [schemas.PrimitiveArrays]}); let object; realm.write(() => { object = realm.create('PrimitiveArrays', { int: [1, 2, 3], float: [1.1, 2.2, 3.3], double: [1.11, 2.22, 3.33], date: [DATE1, DATE2, DATE3], optInt: [1, null, 2], optFloat: [1.1, null, 3.3], optDouble: [1.11, null, 3.33], optDate: [DATE1, null, DATE3] }); }); for (let prop of ['int', 'float', 'double', 'date', 'optInt', 'optFloat', 'optDouble', 'optDate']) { const list = object[prop]; TestCase.assertSimilar(list.type, list.min(), list[0]); TestCase.assertSimilar(list.type, list.max(), list[2]); if (list.type === 'date') { TestCase.assertThrowsContaining(() => list.sum(), "Cannot sum 'date' array: operation not supported") TestCase.assertThrowsContaining(() => list.avg(), "Cannot average 'date' array: operation not supported") continue; } const sum = list[0] + list[1] + list[2]; const avg = sum / (list[1] === null ? 2 : 3); TestCase.assertSimilar(list.type, list.sum(), sum); TestCase.assertSimilar(list.type, list.avg(), avg); } TestCase.assertThrowsContaining(() => object.bool.min(), "Cannot min 'bool' array: operation not supported") TestCase.assertThrowsContaining(() => object.int.min("foo"), "Invalid arguments: at most 0 expected, but 1 supplied") }, testPrimitiveListFunctions: function () { const realm = new Realm({schema: [schemas.PrimitiveArrays]}); realm.write(() => { realm.create('PrimitiveArrays', { int: [1, 2, 3], float: [1.1, 2.2, 3.3], double: [1.11, 2.22, 3.33], date: [DATE1, DATE2, DATE3], string: ['1', '2', '3'], }); }); let objects = realm.objects('PrimitiveArrays'); TestCase.assertEqual(objects.length, 1); TestCase.assertEqual(objects[0]['string'].length, 3); TestCase.assertEqual(objects[0]['string'].join(','), '1,2,3'); realm.close(); }, testListAggregateFunctionsUnsupported: function() { const NullableBasicTypesList = { name: 'NullableBasicTypesList', properties: { list: {type: 'list', objectType: 'NullableBasicTypesObject'}, } }; const realm = new Realm({schema: [schemas.NullableBasicTypes, NullableBasicTypesList]}); const N = 5; var list = []; for (let i = 0; i < N; i++) { list.push({ intCol: i+1, floatCol: i+1, doubleCol: i+1, dateCol: new Date(i+1) }); } let object; realm.write(() => { object = realm.create('NullableBasicTypesList', {list: list}); }); TestCase.assertEqual(object.list.length, N); // bool, string & data columns don't support 'min' ['bool', 'string', 'data'].forEach(colName => { TestCase.assertThrowsContaining(() => object.list.min(colName + 'Col'), `Cannot min property '${colName}Col': operation not supported for '${colName}' properties`); }); // bool, string & data columns don't support 'max' ['bool', 'string', 'data'].forEach(colName => { TestCase.assertThrowsContaining(() => object.list.max(colName + 'Col'), `Cannot max property '${colName}Col': operation not supported for '${colName}' properties`); }); // bool, string, date & data columns don't support 'avg' ['bool', 'string', 'date', 'data'].forEach(colName => { TestCase.assertThrowsContaining(() => object.list.avg(colName + 'Col'), `Cannot average property '${colName}Col': operation not supported for '${colName}' properties`); }); // bool, string, date & data columns don't support 'sum' ['bool', 'string', 'date', 'data'].forEach(colName => { TestCase.assertThrowsContaining(() => object.list.sum(colName + 'Col'), `Cannot sum property '${colName}Col': operation not supported for '${colName}' properties`); }); }, testListAggregateFunctionsWrongProperty: function() { const realm = new Realm({schema: [schemas.PersonObject, schemas.PersonList]}); let object; realm.write(() => { object = realm.create('PersonList', {list: [ {name: 'Ari', age: 10}, {name: 'Tim', age: 11}, {name: 'Bjarne', age: 12}, ]}); }); TestCase.assertThrowsContaining(() => object.list.min('foo'), "Property 'foo' does not exist on object 'PersonObject'"); TestCase.assertThrowsContaining(() => object.list.max('foo'), "Property 'foo' does not exist on object 'PersonObject'"); TestCase.assertThrowsContaining(() => object.list.sum('foo'), "Property 'foo' does not exist on object 'PersonObject'"); TestCase.assertThrowsContaining(() => object.list.avg('foo'), "Property 'foo' does not exist on object 'PersonObject'"); TestCase.assertThrowsContaining(() => object.list.min(), "JS value must be of type 'string', got (undefined)"); TestCase.assertThrowsContaining(() => object.list.max(), "JS value must be of type 'string', got (undefined)"); TestCase.assertThrowsContaining(() => object.list.sum(), "JS value must be of type 'string', got (undefined)"); TestCase.assertThrowsContaining(() => object.list.avg(), "JS value must be of type 'string', got (undefined)"); }, testListNested: function() { const realm = new Realm({schema: [schemas.ParentObject, schemas.NameObject]}); realm.write(() => { realm.create('ParentObject', { id: 1, name: [ { family: 'Larsen', given: ['Hans', 'Jørgen'], prefix: [] }, { family: 'Hansen', given: ['Ib'], prefix: [] } ] }); realm.create('ParentObject', { id: 2, name: [ {family: 'Petersen', given: ['Gurli', 'Margrete'], prefix: [] } ] }); }); let objects = realm.objects('ParentObject'); TestCase.assertEqual(objects.length, 2); TestCase.assertEqual(objects[0].name.length, 2); TestCase.assertEqual(objects[0].name[0].given.length, 2); TestCase.assertEqual(objects[0].name[0].prefix.length, 0); TestCase.assertEqual(objects[0].name[0].given[0], 'Hans'); TestCase.assertEqual(objects[0].name[0].given[1], 'Jørgen') TestCase.assertEqual(objects[0].name[1].given.length, 1); TestCase.assertEqual(objects[0].name[1].given[0], 'Ib'); TestCase.assertEqual(objects[0].name[1].prefix.length, 0); TestCase.assertEqual(objects[1].name.length, 1); TestCase.assertEqual(objects[1].name[0].given.length, 2); TestCase.assertEqual(objects[1].name[0].prefix.length, 0); TestCase.assertEqual(objects[1].name[0].given[0], 'Gurli'); TestCase.assertEqual(objects[1].name[0].given[1], 'Margrete'); }, testListNestedFromJSON: function() { let json = '{"id":1, "name": [{ "family": "Larsen", "given": ["Hans", "Jørgen"], "prefix": [] }, { "family": "Hansen", "given": ["Ib"], "prefix": [] }] }'; let parent = JSON.parse(json); const realm = new Realm({schema: [schemas.ParentObject, schemas.NameObject]}); realm.write(() => { realm.create('ParentObject', parent); }); let objects = realm.objects('ParentObject'); TestCase.assertEqual(objects.length, 1); TestCase.assertEqual(objects[0].name.length, 2); TestCase.assertEqual(objects[0].name[0].given.length, 2); TestCase.assertEqual(objects[0].name[0].prefix.length, 0); TestCase.assertEqual(objects[0].name[0].given[0], 'Hans'); TestCase.assertEqual(objects[0].name[0].given[1], 'Jørgen'); TestCase.assertEqual(objects[0].name[1].given.length, 1); TestCase.assertEqual(objects[0].name[1].prefix.length, 0); TestCase.assertEqual(objects[0].name[1].given[0], 'Ib'); }, testMultipleLists: function() { const realm = new Realm({schema: [schemas.MultiListObject]}); realm.write(() => { realm.create('MultiListObject', { id: 0, list1: ["Hello"], list2: ["World"] }); realm.create('MultiListObject', { id: 1, list1: ["Foo"], list2: ["Bar"] }); }); let objects = realm.objects('MultiListObject'); TestCase.assertEqual(objects.length, 2); TestCase.assertEqual(objects[0].id, 0); TestCase.assertEqual(objects[0].list1.length, 1); TestCase.assertEqual(objects[0].list1[0], "Hello"); TestCase.assertEqual(objects[0].list2.length, 1); TestCase.assertEqual(objects[0].list2[0], "World"); TestCase.assertEqual(objects[1].id, 1); TestCase.assertEqual(objects[1].list1.length, 1); TestCase.assertEqual(objects[1].list1[0], "Foo"); TestCase.assertEqual(objects[1].list2.length, 1); TestCase.assertEqual(objects[1].list2[0], "Bar"); }, testGetAndApplySchema: function() { const realm1 = new Realm({ schema: [schemas.NameObject], _cache: false, }); realm1.write(() => { realm1.create(schemas.NameObject.name, { family: 'Smith', given: [ 'Bob', 'Ted']}); }) const schema = realm1.schema; realm1.close(); const realm2 = new Realm({ schema: schema, _cache: false, }); let names = realm2.objects(schemas.NameObject.name); TestCase.assertEqual(names.length, 1); TestCase.assertEqual(names[0]['family'], 'Smith'); TestCase.assertEqual(names[0]['given'].length, 2); realm2.close(); }, };
1
18,212
Perhaps use the new `.keys()` method here instead?
realm-realm-js
js
@@ -79,9 +79,10 @@ func (b *builder) kubernetesDiff( } } - result, err := provider.DiffList(oldManifests, newManifests, + result, err := provider.DiffList( + oldManifests, + newManifests, diff.WithEquateEmpty(), - diff.WithIgnoreAddingMapKeys(), diff.WithCompareNumberAndNumericString(), ) if err != nil {
1
// Copyright 2021 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package planpreview import ( "bytes" "context" "fmt" "io" "go.uber.org/zap" provider "github.com/pipe-cd/pipe/pkg/app/piped/cloudprovider/kubernetes" "github.com/pipe-cd/pipe/pkg/app/piped/deploysource" "github.com/pipe-cd/pipe/pkg/cache" "github.com/pipe-cd/pipe/pkg/config" "github.com/pipe-cd/pipe/pkg/diff" "github.com/pipe-cd/pipe/pkg/model" ) func (b *builder) kubernetesDiff( ctx context.Context, app *model.Application, cmd model.Command_BuildPlanPreview, lastSuccessfulCommit string, buf *bytes.Buffer, ) (string, error) { var oldManifests, newManifests []provider.Manifest var err error repoCfg := config.PipedRepository{ RepoID: b.repoCfg.RepoID, Remote: b.repoCfg.Remote, Branch: cmd.HeadBranch, } targetDSP := deploysource.NewProvider( b.workingDir, repoCfg, "target", cmd.HeadCommit, b.gitClient, app.GitPath, b.secretDecrypter, ) newManifests, err = loadKubernetesManifests(ctx, *app, cmd.HeadCommit, targetDSP, b.appManifestsCache, b.logger) if err != nil { fmt.Fprintf(buf, "failed to load kubernetes manifests at the head commit (%v)\n", err) return "", err } if lastSuccessfulCommit != "" { runningDSP := deploysource.NewProvider( b.workingDir, repoCfg, "running", lastSuccessfulCommit, b.gitClient, app.GitPath, b.secretDecrypter, ) oldManifests, err = loadKubernetesManifests(ctx, *app, lastSuccessfulCommit, runningDSP, b.appManifestsCache, b.logger) if err != nil { fmt.Fprintf(buf, "failed to load kubernetes manifests at the running commit (%v)\n", err) return "", err } } result, err := provider.DiffList(oldManifests, newManifests, diff.WithEquateEmpty(), diff.WithIgnoreAddingMapKeys(), diff.WithCompareNumberAndNumericString(), ) if err != nil { fmt.Fprintf(buf, "failed to compare manifests (%v)\n", err) return "", err } if result.NoChange() { fmt.Fprintln(buf, "No changes were detected") return "No changes were detected", nil } summary := fmt.Sprintf("%d added manifests, %d changed manifests, %d deleted manifests", len(result.Adds), len(result.Changes), len(result.Deletes)) fmt.Fprintf(buf, "--- Last Deploy\n+++ Head Commit\n\n%s\n", result.DiffString()) return summary, nil } func loadKubernetesManifests(ctx context.Context, app model.Application, commit string, dsp deploysource.Provider, manifestsCache cache.Cache, logger *zap.Logger) (manifests []provider.Manifest, err error) { cache := provider.AppManifestsCache{ AppID: app.Id, Cache: manifestsCache, Logger: logger, } manifests, ok := cache.Get(commit) if ok { return manifests, nil } // When the manifests were not in the cache we have to load them. ds, err := dsp.Get(ctx, io.Discard) if err != nil { return nil, err } deployCfg := ds.DeploymentConfig.KubernetesDeploymentSpec if deployCfg == nil { return nil, fmt.Errorf("malformed deployment configuration file") } loader := provider.NewManifestLoader( app.Name, ds.AppDir, ds.RepoDir, app.GitPath.ConfigFilename, deployCfg.Input, logger, ) manifests, err = loader.LoadManifests(ctx) if err != nil { return nil, err } cache.Put(commit, manifests) return manifests, nil }
1
17,895
We don't need this option because plan-preview is comparing between 2 commits, not using the live state.
pipe-cd-pipe
go
@@ -37,6 +37,11 @@ module Beaker # @param [Proc] block The actions to be performed in this step. def step step_name, &block logger.notify "\n * #{step_name}\n" + if @options + @options[:current_test_info] ||= {} + @options[:current_test_info][:step] ||= {} + @options[:current_test_info][:step][:name] = step_name + end yield if block_given? end
1
require 'beaker/dsl/assertions' module Beaker module DSL # These are simple structural elements necessary for writing # understandable tests and ensuring cleanup actions happen. If using a # third party test runner they are unnecessary. # # To include this in your own test runner a method #logger should be # available to yield a logger that implements # {Beaker::Logger}'s interface. As well as a method # #teardown_procs that yields an array. # # @example Structuring a test case. # test_name 'Look at me testing things!' do # teardown do # ...clean up actions... # end # # step 'Prepare the things' do # ...setup steps... # end # # step 'Test the things' do # ...tests... # end # # step 'Expect this to fail' do # expect_failure('expected to fail due to PE-1234') do # assert_equal(400, response.code, 'bad response code from API call') # end # end # module Structure # Provides a method to help structure tests into coherent steps. # @param [String] step_name The name of the step to be logged. # @param [Proc] block The actions to be performed in this step. def step step_name, &block logger.notify "\n * #{step_name}\n" yield if block_given? end # Provides a method to name tests. # # @param [String] my_name The name of the test to be logged. # @param [Proc] block The actions to be performed during this test. # def test_name my_name, &block logger.notify "\n#{my_name}\n" yield if block_given? end # Declare a teardown process that will be called after a test case is # complete. # # @param block [Proc] block of code to execute during teardown # @example Always remove /etc/puppet/modules # teardown do # on(master, puppet_resource('file', '/etc/puppet/modules', # 'ensure=absent', 'purge=true')) # end def teardown &block @teardown_procs << block end # Wrap an assert that is supposed to fail due to a product bug, an # undelivered feature, or some similar situation. # # This converts failing asserts into passing asserts (so we can continue to # run the test even though there are underlying product bugs), and converts # passing asserts into failing asserts (so we know when the underlying product # bug has been fixed). # # Pass an assert as a code block, and pass an explanatory message as a # parameter. The assert's logic will be inverted (so passes turn into fails # and fails turn into passes). # # @example Typical usage # expect_failure('expected to fail due to PE-1234') do # assert_equal(400, response.code, 'bad response code from API call') # end # # @example Output when a product bug would normally cause the assert to fail # Warning: An assertion was expected to fail, and did. # This is probably due to a known product bug, and is probably not a problem. # Additional info: 'expected to fail due to PE-6995' # Failed assertion: 'bad response code from API call. # <400> expected but was <409>.' # # @example Output when the product bug has been fixed # <RuntimeError: An assertion was expected to fail, but passed. # This is probably because a product bug was fixed, and "expect_failure()" # needs to be removed from this assert. # Additional info: 'expected to fail due to PE-6996'> # # @param [String] explanation A description of why this assert is expected to # fail # @param block [Proc] block of code is expected to either raise an # {Beaker::Assertions} or else return a value that # will be ignored # @raise [RuntimeError] if the code block passed to this method does not raise # a {Beaker::Assertions} (i.e., if the assert # passes) # @author Chris Cowell-Shah (<tt>[email protected]</tt>) def expect_failure(explanation, &block) begin yield if block_given? # code block should contain an assert that you expect to fail rescue Beaker::DSL::Assertions, Minitest::Assertion => failed_assertion # Yay! The assert in the code block failed, as expected. # Swallow the failure so the test passes. logger.notify 'An assertion was expected to fail, and did. ' + 'This is probably due to a known product bug, ' + 'and is probably not a problem. ' + "Additional info: '#{explanation}' " + "Failed assertion: '#{failed_assertion}'" return end # Uh-oh! The assert in the code block unexpectedly passed. fail('An assertion was expected to fail, but passed. ' + 'This is probably because a product bug was fixed, and ' + '"expect_failure()" needs to be removed from this test. ' + "Additional info: '#{explanation}'") end # Limit the hosts a test case is run against # @note This will modify the {Beaker::TestCase#hosts} member # in place unless an array of hosts is passed into it and # {Beaker::TestCase#logger} yielding an object that responds # like {Beaker::Logger#warn}, as well as # {Beaker::DSL::Outcomes#skip_test}, and optionally # {Beaker::TestCase#hosts}. # # @param [Symbol] type The type of confinement to do. Valid parameters # are *:to* to confine the hosts to only those that # match *criteria* or *:except* to confine the test # case to only those hosts that do not match # criteria. # @param [Hash{Symbol,String=>String,Regexp,Array<String,Regexp>}] # criteria Specify the criteria with which a host should be # considered for inclusion or exclusion. The key is any attribute # of the host that will be yielded by {Beaker::Host#[]}. # The value can be any string/regex or array of strings/regexp. # The values are compared using [Enumerable#any?] so that if one # value of an array matches the host is considered a match for that # criteria. # @param [Array<Host>] host_array This creatively named parameter is # an optional array of hosts to confine to. If not passed in, this # method will modify {Beaker::TestCase#hosts} in place. # @param [Proc] block Addition checks to determine suitability of hosts # for confinement. Each host that is still valid after checking # *criteria* is then passed in turn into this block. The block # should return true if the host matches this additional criteria. # # @example Basic usage to confine to debian OSes. # confine :to, :platform => 'debian' # # @example Confining to anything but Windows and Solaris # confine :except, :platform => ['windows', 'solaris'] # # @example Using additional block to confine to Solaris global zone. # confine :to, :platform => 'solaris' do |solaris| # on( solaris, 'zonename' ) =~ /global/ # end # # @return [Array<Host>] Returns an array of hosts that are still valid # targets for this tests case. # @raise [SkipTest] Raises skip test if there are no valid hosts for # this test case after confinement. def confine(type, criteria, host_array = nil, &block) hosts_to_modify = host_array || hosts case type when :except hosts_to_modify = hosts_to_modify - select_hosts(criteria, hosts_to_modify, &block) when :to hosts_to_modify = select_hosts(criteria, hosts_to_modify, &block) else raise "Unknown option #{type}" end if hosts_to_modify.empty? logger.warn "No suitable hosts with: #{criteria.inspect}" skip_test 'No suitable hosts found' end self.hosts = hosts_to_modify hosts_to_modify end # Ensures that host restrictions as specifid by type, criteria and # host_array are confined to activity within the passed block. # TestCase#hosts is reset after block has executed. # # @see #confine def confine_block(type, criteria, host_array = nil, &block) begin original_hosts = self.hosts.dup confine(type, criteria, host_array) yield ensure self.hosts = original_hosts end end #Return a set of hosts that meet the given criteria # @param [Hash{Symbol,String=>String,Regexp,Array<String,Regexp>}] # criteria Specify the criteria with which a host should be # considered for inclusion. The key is any attribute # of the host that will be yielded by {Beaker::Host#[]}. # The value can be any string/regex or array of strings/regexp. # The values are compared using [Enumerable#any?] so that if one # value of an array matches the host is considered a match for that # criteria. # @param [Array<Host>] host_array This creatively named parameter is # an optional array of hosts to confine to. If not passed in, this # method will modify {Beaker::TestCase#hosts} in place. # @param [Proc] block Addition checks to determine suitability of hosts # for selection. Each host that is still valid after checking # *criteria* is then passed in turn into this block. The block # should return true if the host matches this additional criteria. # # @return [Array<Host>] Returns an array of hosts that meet the provided criteria def select_hosts(criteria, host_array = nil, &block) hosts_to_select_from = host_array || hosts criteria.each_pair do |property, value| hosts_to_select_from = hosts_to_select_from.select do |host| inspect_host host, property, value end end if block_given? hosts_to_select_from = hosts_to_select_from.select do |host| yield host end end hosts_to_select_from end # @!visibility private def inspect_host(host, property, one_or_more_values) values = Array(one_or_more_values) return values.any? do |value| true_false = false case value when String true_false = host[property.to_s].include? value when Regexp true_false = host[property.to_s] =~ value end true_false end end end end end
1
9,714
I'm not sure about putting this in the options. I would see it living in an object and then being queried on through a class - a more object oriented, ruby solution.
voxpupuli-beaker
rb
@@ -237,7 +237,8 @@ func (card *DRICard) readPowerConsumption() (float64, error) { return 0, fmt.Errorf("cannot find average power consumption param in the amdgpu_pm_info") } -func (card *DRICard) Metrics() (*DRICardMetrics, error) { +// Status returns hardware stats for the card +func (card *DRICard) Status() (*DRICardMetrics, error) { if len(card.HwmonPath) == 0 { return nil, fmt.Errorf("metrics interface is not available for %s", card.Name) }
1
package gpu import ( "bufio" "errors" "fmt" "io/ioutil" "os" "path" "regexp" "strconv" "strings" ) const ( pciSlotName = "PCI_SLOT_NAME" ) var ( devDRICardNameRe = regexp.MustCompile(`^(card)([\d]+)`) fullDevicePathRe = regexp.MustCompile(`(\/dev\/[a-z]+)(\d+)`) errBadNameFormat = errors.New("bad name format") ) type DRICard struct { Num int Name string Path string Devices []string DeviceID uint64 VendorID uint64 Major uint64 Minor uint64 PCIBusID string Memory uint64 HwmonPath string } type DRICardMetrics struct { Temperature float64 Fan float64 Power float64 } func NewDRICard(num int, name, path string) (DRICard, error) { c := DRICard{ Num: num, Name: name, Path: path, } err := c.collectRelatedDevices() if err != nil { return DRICard{}, err } err = c.collectDeviceVendorIDs() if err != nil { return DRICard{}, err } err = c.collectPCIBusID() if err != nil { return DRICard{}, err } c.collectHwmonPath() return c, nil } // collectRelatedDevices collects related control and render Devices for card func (card *DRICard) collectRelatedDevices() error { major, minor, err := deviceNumber(card.Path) if err != nil { return err } // query /sys for related DRI Devices sysDevPath := fmt.Sprintf("/sys/dev/char/%d:%d/device/drm/", major, minor) card.Major = major card.Minor = minor fi, err := ioutil.ReadDir(sysDevPath) if err != nil { return err } var devices []string // add found Devices as part of the DRI for _, f := range fi { if f.IsDir() { devices = append(devices, path.Join("/dev/dri/", f.Name())) } } card.Devices = devices return nil } // collectDeviceVendorIDs read vendor and device IDs from /sys and parses its values func (card *DRICard) collectDeviceVendorIDs() error { vendorIDFile := fmt.Sprintf("/sys/class/drm/%s/device/vendor", card.Name) deviceIDFile := fmt.Sprintf("/sys/class/drm/%s/device/device", card.Name) vendorID, err := readSysClassValue(vendorIDFile) if err != nil { return err } deviceID, err := readSysClassValue(deviceIDFile) if err != nil { return err } card.DeviceID = deviceID card.VendorID = vendorID return nil } func (card *DRICard) collectPCIBusID() error { p := fmt.Sprintf("/sys/class/drm/%s/device/uevent", card.Name) f, err := os.Open(p) if err != nil { return err } defer f.Close() scanner := bufio.NewScanner(f) scanner.Split(bufio.ScanLines) for scanner.Scan() { line := scanner.Text() id, ok := parsePCISlotName(line) if ok { card.PCIBusID = id return nil } } return errors.New("cannot find PCI slot name into the file") } func (card *DRICard) collectHwmonPath() { p := fmt.Sprintf("/sys/dev/char/%d:%d/device/hwmon", card.Major, card.Minor) dir, err := ioutil.ReadDir(p) if err != nil { // if we can't find such directory - just skip it, // hwmon dir may be not present for on-board card. return } for _, fd := range dir { if fd.IsDir() && strings.HasPrefix(fd.Name(), "hwmon") { card.HwmonPath = fmt.Sprintf("%s/%s", p, fd.Name()) break } } } func (card *DRICard) readFanSpeed() (float64, error) { rawPwm1, err := ioutil.ReadFile(card.HwmonPath + "/pwm1") if err != nil { return 0, fmt.Errorf("failed to read pwm1 data: %v", err) } rawPwmMax, err := ioutil.ReadFile(card.HwmonPath + "/pwm1_max") if err != nil { return 0, fmt.Errorf("failed to read pwm1_max data: %v", err) } pwm1, err := strconv.ParseFloat(strings.TrimSpace(string(rawPwm1)), 64) if err != nil { return 0, err } pwmMax, err := strconv.ParseFloat(strings.TrimSpace(string(rawPwmMax)), 64) if err != nil { return 0, err } speedPercent := pwm1 / pwmMax * 100 return speedPercent, nil } func (card *DRICard) readTemperature() (float64, error) { rawTemp, err := ioutil.ReadFile(card.HwmonPath + "/temp1_input") if err != nil { return 0, err } temp, err := strconv.ParseFloat(strings.TrimSpace(string(rawTemp)), 64) if err != nil { return 0, err } return temp / 1000, nil } func (card *DRICard) readPowerConsumption() (float64, error) { p := fmt.Sprintf("/sys/kernel/debug/dri/%d/amdgpu_pm_info", card.Num) raw, err := ioutil.ReadFile(p) if err != nil { if strings.Contains(err.Error(), "permission denied") { // ignore permission error, it allows us to run worker as regular user return 0, nil } return 0, err } scanner := bufio.NewScanner(strings.NewReader(string(raw))) for scanner.Scan() { line := scanner.Text() if strings.Contains(line, "average GPU") { f := strings.Fields(line) if len(f) == 0 { return 0, fmt.Errorf("cannot extract power info from amdgpu_pm_info") } power, err := strconv.ParseFloat(strings.TrimSpace(f[0]), 64) if err != nil { return 0, err } return power, nil } } return 0, fmt.Errorf("cannot find average power consumption param in the amdgpu_pm_info") } func (card *DRICard) Metrics() (*DRICardMetrics, error) { if len(card.HwmonPath) == 0 { return nil, fmt.Errorf("metrics interface is not available for %s", card.Name) } fan, err := card.readFanSpeed() if err != nil { return nil, err } temp, err := card.readTemperature() if err != nil { return nil, err } power, err := card.readPowerConsumption() if err != nil { return nil, err } m := &DRICardMetrics{ Fan: fan, Temperature: temp, Power: power, } return m, nil } // readSysClassValue reads value from /sys/class/xxx file and // try to parse it as integer func readSysClassValue(f string) (uint64, error) { raw, err := ioutil.ReadFile(f) if err != nil { return 0, err } return parseSysClassValue(raw) } // parseSysClassValue parses data that was read from /sys/class/xxx as uint64 value func parseSysClassValue(v []byte) (uint64, error) { hexWithEOL := strings.Replace(string(v), "0x", "", 1) hex := strings.TrimSpace(hexWithEOL) return strconv.ParseUint(hex, 16, 64) } // parsePCISlotName receive value from the uevent file // and return PCI slot name if any func parsePCISlotName(s string) (string, bool) { if strings.Contains(s, pciSlotName) { parts := strings.Split(s, "=") if len(parts) == 2 { return parts[1], true } } return "", false } // CollectDRICardDevices traverses overs /dev/dri and collect card Devices // which can be bound into the container func CollectDRICardDevices() ([]DRICard, error) { var cards []DRICard ls, err := ioutil.ReadDir("/dev/dri") if err != nil { return cards, err } for _, ff := range ls { c, err := newCardDeviceByName(ff.Name()) if err != nil { if err == errBadNameFormat { continue } return []DRICard{}, err } cards = append(cards, c) } return cards, nil } // newCardDeviceByName returns DRI device by given name. func newCardDeviceByName(s string) (DRICard, error) { m := devDRICardNameRe.FindStringSubmatch(s) match := m != nil && len(m) == 3 if !match { return DRICard{}, errBadNameFormat } // do not check error because regexp matches by this group by numeric value v, _ := strconv.ParseInt(m[2], 10, 64) return NewDRICard(int(v), m[0], path.Join("/dev/dri", m[0])) } // newCardByDevicePath returns related DRI card device for // given NVidia device path. Ex: /dev/nvidia1 -> /dev/dri/card1. func newCardByDevicePath(s string) (DRICard, error) { m := fullDevicePathRe.FindStringSubmatch(s) if len(m) != 3 { return DRICard{}, fmt.Errorf("cannot extract device index from %s", s) } name := fmt.Sprintf("card%s", m[2]) card, err := newCardDeviceByName(name) if err != nil { return DRICard{}, err } return card, nil }
1
7,905
So `Status` returns metrics?
sonm-io-core
go
@@ -3,6 +3,8 @@ class OrganizationsController < ApplicationController helper RatingsHelper before_action :find_organization + before_action :set_outside_committers, only: :outside_committers + before_action :organization_context, only: :outside_committers def outside_projects @outside_projects = @organization.outside_projects((params[:page] || 1), 20)
1
class OrganizationsController < ApplicationController helper ProjectsHelper helper RatingsHelper before_action :find_organization def outside_projects @outside_projects = @organization.outside_projects((params[:page] || 1), 20) end private def find_organization @organization = Organization.from_param(params[:id]).first! rescue ActiveRecord::RecordNotFound raise ParamRecordNotFound end end
1
7,003
Can we make a generic method say, `set_infographic_view` and in that method we can check the 4 values (outside_committers, portfolio_projects, outside_projects, affiliated_committers) and check against params[:action] and params[:view] and then trigger the respective method dynamically as it takes the standard argument(s) for all the four methods. What do you think?
blackducksoftware-ohloh-ui
rb
@@ -56,7 +56,15 @@ public final class IcebergDateObjectInspectorHive3 extends AbstractPrimitiveJava @Override public Object copyObject(Object o) { - return o == null ? null : new Date((Date) o); + if (o == null) { + return null; + } + + if (o instanceof Date) { + return new Date((Date) o); + } else { + return o; + } } }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.mr.hive.serde.objectinspector; import java.time.LocalDate; import org.apache.hadoop.hive.common.type.Date; import org.apache.hadoop.hive.serde2.io.DateWritableV2; import org.apache.hadoop.hive.serde2.objectinspector.primitive.AbstractPrimitiveJavaObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.iceberg.util.DateTimeUtil; public final class IcebergDateObjectInspectorHive3 extends AbstractPrimitiveJavaObjectInspector implements DateObjectInspector { private static final IcebergDateObjectInspectorHive3 INSTANCE = new IcebergDateObjectInspectorHive3(); public static IcebergDateObjectInspectorHive3 get() { return INSTANCE; } private IcebergDateObjectInspectorHive3() { super(TypeInfoFactory.dateTypeInfo); } @Override public Date getPrimitiveJavaObject(Object o) { if (o == null) { return null; } LocalDate date = (LocalDate) o; return Date.ofEpochDay(DateTimeUtil.daysFromDate(date)); } @Override public DateWritableV2 getPrimitiveWritableObject(Object o) { return o == null ? null : new DateWritableV2(DateTimeUtil.daysFromDate((LocalDate) o)); } @Override public Object copyObject(Object o) { return o == null ? null : new Date((Date) o); } }
1
27,557
Why would this be called to copy a non-Date object?
apache-iceberg
java
@@ -77,13 +77,13 @@ func (msg *Boolmsg) Serialize() []byte { native.PutUint16(buf[0:2], uint16(msg.Len())) native.PutUint16(buf[2:4], msg.Type) if msg.Value { - buf[4] = 1 + native.PutUint32(buf[4:8], uint32(1)) } else { - buf[4] = 0 + native.PutUint32(buf[4:8], uint32(0)) } return buf } func (msg *Boolmsg) Len() int { - return unix.NLA_HDRLEN + 1 + return unix.NLA_HDRLEN + 4 // alignment }
1
// +build linux package libcontainer import ( "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) // list of known message types we want to send to bootstrap program // The number is randomly chosen to not conflict with known netlink types const ( InitMsg uint16 = 62000 CloneFlagsAttr uint16 = 27281 NsPathsAttr uint16 = 27282 UidmapAttr uint16 = 27283 GidmapAttr uint16 = 27284 SetgroupAttr uint16 = 27285 OomScoreAdjAttr uint16 = 27286 RootlessAttr uint16 = 27287 UidmapPathAttr uint16 = 27288 GidmapPathAttr uint16 = 27289 ) type Int32msg struct { Type uint16 Value uint32 } // Serialize serializes the message. // Int32msg has the following representation // | nlattr len | nlattr type | // | uint32 value | func (msg *Int32msg) Serialize() []byte { buf := make([]byte, msg.Len()) native := nl.NativeEndian() native.PutUint16(buf[0:2], uint16(msg.Len())) native.PutUint16(buf[2:4], msg.Type) native.PutUint32(buf[4:8], msg.Value) return buf } func (msg *Int32msg) Len() int { return unix.NLA_HDRLEN + 4 } // Bytemsg has the following representation // | nlattr len | nlattr type | // | value | pad | type Bytemsg struct { Type uint16 Value []byte } func (msg *Bytemsg) Serialize() []byte { l := msg.Len() buf := make([]byte, (l+unix.NLA_ALIGNTO-1) & ^(unix.NLA_ALIGNTO-1)) native := nl.NativeEndian() native.PutUint16(buf[0:2], uint16(l)) native.PutUint16(buf[2:4], msg.Type) copy(buf[4:], msg.Value) return buf } func (msg *Bytemsg) Len() int { return unix.NLA_HDRLEN + len(msg.Value) + 1 // null-terminated } type Boolmsg struct { Type uint16 Value bool } func (msg *Boolmsg) Serialize() []byte { buf := make([]byte, msg.Len()) native := nl.NativeEndian() native.PutUint16(buf[0:2], uint16(msg.Len())) native.PutUint16(buf[2:4], msg.Type) if msg.Value { buf[4] = 1 } else { buf[4] = 0 } return buf } func (msg *Boolmsg) Len() int { return unix.NLA_HDRLEN + 1 }
1
15,905
This alignment issue had not appear because no multiple `Boolmsg`s had been serialized at once.
opencontainers-runc
go
@@ -71,7 +71,7 @@ func DefaultConfig() Config { PauseContainerTag: DefaultPauseContainerTag, AWSVPCBlockInstanceMetdata: false, ContainerMetadataEnabled: false, - TaskCPUMemLimit: DefaultEnabled, + TaskCPUMemLimit: BooleanDefaultTrue{Value: NotSet}, CgroupPath: defaultCgroupPath, TaskMetadataSteadyStateRate: DefaultTaskMetadataSteadyStateRate, TaskMetadataBurstRate: DefaultTaskMetadataBurstRate,
1
// +build !windows // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package config import ( "fmt" "os" "time" "github.com/aws/amazon-ecs-agent/agent/dockerclient" "github.com/aws/amazon-ecs-agent/agent/utils" ) const ( // AgentCredentialsAddress is used to serve the credentials for tasks. AgentCredentialsAddress = "" // this is left blank right now for net=bridge // defaultAuditLogFile specifies the default audit log filename defaultCredentialsAuditLogFile = "/log/audit.log" // DefaultTaskCgroupPrefix is default cgroup prefix for ECS tasks DefaultTaskCgroupPrefix = "/ecs" // Default cgroup memory system root path, this is the default used if the // path has not been configured through ECS_CGROUP_PATH defaultCgroupPath = "/sys/fs/cgroup" // defaultContainerStartTimeout specifies the value for container start timeout duration defaultContainerStartTimeout = 3 * time.Minute // minimumContainerStartTimeout specifies the minimum value for starting a container minimumContainerStartTimeout = 45 * time.Second // default docker inactivity time is extra time needed on container extraction defaultImagePullInactivityTimeout = 1 * time.Minute ) // DefaultConfig returns the default configuration for Linux func DefaultConfig() Config { return Config{ DockerEndpoint: "unix:///var/run/docker.sock", ReservedPorts: []uint16{SSHPort, DockerReservedPort, DockerReservedSSLPort, AgentIntrospectionPort, AgentCredentialsPort}, ReservedPortsUDP: []uint16{}, DataDir: "/data/", DataDirOnHost: "/var/lib/ecs", DisableMetrics: false, ReservedMemory: 0, AvailableLoggingDrivers: []dockerclient.LoggingDriver{dockerclient.JSONFileDriver, dockerclient.NoneDriver}, TaskCleanupWaitDuration: DefaultTaskCleanupWaitDuration, DockerStopTimeout: defaultDockerStopTimeout, ContainerStartTimeout: defaultContainerStartTimeout, CredentialsAuditLogFile: defaultCredentialsAuditLogFile, CredentialsAuditLogDisabled: false, ImageCleanupDisabled: false, MinimumImageDeletionAge: DefaultImageDeletionAge, NonECSMinimumImageDeletionAge: DefaultNonECSImageDeletionAge, ImageCleanupInterval: DefaultImageCleanupTimeInterval, ImagePullInactivityTimeout: defaultImagePullInactivityTimeout, NumImagesToDeletePerCycle: DefaultNumImagesToDeletePerCycle, NumNonECSContainersToDeletePerCycle: DefaultNumNonECSContainersToDeletePerCycle, CNIPluginsPath: defaultCNIPluginsPath, PauseContainerTarballPath: pauseContainerTarballPath, PauseContainerImageName: DefaultPauseContainerImageName, PauseContainerTag: DefaultPauseContainerTag, AWSVPCBlockInstanceMetdata: false, ContainerMetadataEnabled: false, TaskCPUMemLimit: DefaultEnabled, CgroupPath: defaultCgroupPath, TaskMetadataSteadyStateRate: DefaultTaskMetadataSteadyStateRate, TaskMetadataBurstRate: DefaultTaskMetadataBurstRate, SharedVolumeMatchFullConfig: false, // only requiring shared volumes to match on name, which is default docker behavior ContainerInstancePropagateTagsFrom: ContainerInstancePropagateTagsFromNoneType, PrometheusMetricsEnabled: false, PollMetrics: false, PollingMetricsWaitDuration: DefaultPollingMetricsWaitDuration, NvidiaRuntime: DefaultNvidiaRuntime, CgroupCPUPeriod: defaultCgroupCPUPeriod, GMSACapable: false, } } func (cfg *Config) platformOverrides() { cfg.PrometheusMetricsEnabled = utils.ParseBool(os.Getenv("ECS_ENABLE_PROMETHEUS_METRICS"), false) if cfg.PrometheusMetricsEnabled { cfg.ReservedPorts = append(cfg.ReservedPorts, AgentPrometheusExpositionPort) } if cfg.TaskENIEnabled { // when task networking is enabled, eni trunking is enabled by default cfg.ENITrunkingEnabled = utils.ParseBool(os.Getenv("ECS_ENABLE_HIGH_DENSITY_ENI"), true) } } // platformString returns platform-specific config data that can be serialized // to string for debugging func (cfg *Config) platformString() string { // Returns a string if the default image name/tag of the Pause container has // been overridden if cfg.PauseContainerImageName == DefaultPauseContainerImageName && cfg.PauseContainerTag == DefaultPauseContainerTag { return fmt.Sprintf(", PauseContainerImageName: %s, PauseContainerTag: %s", cfg.PauseContainerImageName, cfg.PauseContainerTag) } return "" }
1
24,526
Why are we not implementing a BooleanDefaultFalse struct and replacing all of these config vars that default to false with that? Are "default false" config vars not affected by this bug?
aws-amazon-ecs-agent
go
@@ -9,7 +9,7 @@ class RolesController < ApplicationController authorize @role access_level = params[:role][:access_level].to_i - @role.set_access_level(access_level) + @role.access_level = access_level message = '' if params[:user].present? if @role.plan.owner.present? && @role.plan.owner.email == params[:user]
1
class RolesController < ApplicationController include ConditionalUserMailer respond_to :html after_action :verify_authorized def create registered = true @role = Role.new(role_params) authorize @role access_level = params[:role][:access_level].to_i @role.set_access_level(access_level) message = '' if params[:user].present? if @role.plan.owner.present? && @role.plan.owner.email == params[:user] flash[:notice] = _('Cannot share plan with %{email} since that email matches with the owner of the plan.') % {email: params[:user]} else user = User.where_case_insensitive('email',params[:user]).first if Role.find_by(plan: @role.plan, user: user) # role already exists flash[:notice] = _('Plan is already shared with %{email}.') % {email: params[:user]} else if user.nil? registered = false User.invite!(email: params[:user]) message = _('Invitation to %{email} issued successfully. \n') % {email: params[:user]} user = User.find_by(email: params[:user]) end message += _('Plan shared with %{email}.') % {email: user.email} @role.user = user if @role.save if registered deliver_if(recipients: user, key: 'users.added_as_coowner') do |r| UserMailer.sharing_notification(@role, r, inviter: current_user) .deliver_now end end flash[:notice] = message else flash[:alert] = failed_create_error(@role, _('role')) end end end else flash[:notice] = _('Please enter an email address') end redirect_to controller: 'plans', action: 'share', id: @role.plan.id end def update @role = Role.find(params[:id]) authorize @role access_level = params[:role][:access_level].to_i @role.set_access_level(access_level) if @role.update_attributes(role_params) deliver_if(recipients: @role.user, key: 'users.added_as_coowner') do |r| UserMailer.permissions_change_notification(@role, current_user).deliver_now end render json: {code: 1, msg: _("Successfully changed the permissions for #{@role.user.email}. They have been notified via email.")} else render json: {code: 0, msg: flash[:alert]} end end def destroy @role = Role.find(params[:id]) authorize @role user = @role.user plan = @role.plan @role.destroy flash[:notice] = _('Access removed') deliver_if(recipients: user, key: 'users.added_as_coowner') do |r| UserMailer.plan_access_removed(user, plan, current_user).deliver_now end redirect_to controller: 'plans', action: 'share', id: @role.plan.id end # This function makes user's role on a plan inactive - i.e. "removes" this from their plans def deactivate role = Role.find(params[:id]) authorize role role.active = false # if creator, remove from public plans list if role.creator? && role.plan.publicly_visible? role.plan.visibility = Plan.visibilities[:privately_visible] role.plan.save end if role.save flash[:notice] = _('Plan removed') else flash[:alert] = _('Unable to remove the plan') end redirect_to(plans_path) end private def role_params params.require(:role).permit(:plan_id) end end
1
17,919
Replaced this method with a ruby idiomatic setter.
DMPRoadmap-roadmap
rb
@@ -2,7 +2,7 @@ * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2017 Mark Samman <[email protected]> * - * This program is free software; you can redistribute it and/or modify + * This program is free tisoftware; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version.
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2017 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include <bitset> #include "bed.h" #include "chat.h" #include "combat.h" #include "configmanager.h" #include "creatureevent.h" #include "events.h" #include "game.h" #include "iologindata.h" #include "monster.h" #include "movement.h" #include "scheduler.h" #include "weapons.h" extern ConfigManager g_config; extern Game g_game; extern Chat* g_chat; extern Vocations g_vocations; extern MoveEvents* g_moveEvents; extern Weapons* g_weapons; extern CreatureEvents* g_creatureEvents; extern Events* g_events; MuteCountMap Player::muteCountMap; uint32_t Player::playerAutoID = 0x10000000; Player::Player(ProtocolGame_ptr p) : Creature(), lastPing(OTSYS_TIME()), lastPong(lastPing), inbox(new Inbox(ITEM_INBOX)), client(std::move(p)) { inbox->incrementReferenceCounter(); } Player::~Player() { for (Item* item : inventory) { if (item) { item->setParent(nullptr); item->decrementReferenceCounter(); } } for (const auto& it : depotLockerMap) { it.second->removeInbox(inbox); it.second->decrementReferenceCounter(); } inbox->decrementReferenceCounter(); setWriteItem(nullptr); setEditHouse(nullptr); } bool Player::setVocation(uint16_t vocId) { Vocation* voc = g_vocations.getVocation(vocId); if (!voc) { return false; } vocation = voc; Condition* condition = getCondition(CONDITION_REGENERATION, CONDITIONID_DEFAULT); if (condition) { condition->setParam(CONDITION_PARAM_HEALTHGAIN, vocation->getHealthGainAmount()); condition->setParam(CONDITION_PARAM_HEALTHTICKS, vocation->getHealthGainTicks() * 1000); condition->setParam(CONDITION_PARAM_MANAGAIN, vocation->getManaGainAmount()); condition->setParam(CONDITION_PARAM_MANATICKS, vocation->getManaGainTicks() * 1000); } return true; } bool Player::isPushable() const { if (hasFlag(PlayerFlag_CannotBePushed)) { return false; } return Creature::isPushable(); } std::string Player::getDescription(int32_t lookDistance) const { std::ostringstream s; if (lookDistance == -1) { s << "yourself."; if (group->access) { s << " You are " << group->name << '.'; } else if (vocation->getId() != VOCATION_NONE) { s << " You are " << vocation->getVocDescription() << '.'; } else { s << " You have no vocation."; } } else { s << name; if (!group->access) { s << " (Level " << level << ')'; } s << '.'; if (sex == PLAYERSEX_FEMALE) { s << " She"; } else { s << " He"; } if (group->access) { s << " is " << group->name << '.'; } else if (vocation->getId() != VOCATION_NONE) { s << " is " << vocation->getVocDescription() << '.'; } else { s << " has no vocation."; } } if (party) { if (lookDistance == -1) { s << " Your party has "; } else if (sex == PLAYERSEX_FEMALE) { s << " She is in a party with "; } else { s << " He is in a party with "; } size_t memberCount = party->getMemberCount() + 1; if (memberCount == 1) { s << "1 member and "; } else { s << memberCount << " members and "; } size_t invitationCount = party->getInvitationCount(); if (invitationCount == 1) { s << "1 pending invitation."; } else { s << invitationCount << " pending invitations."; } } if (guild && guildRank) { if (lookDistance == -1) { s << " You are "; } else if (sex == PLAYERSEX_FEMALE) { s << " She is "; } else { s << " He is "; } s << guildRank->name << " of the " << guild->getName(); if (!guildNick.empty()) { s << " (" << guildNick << ')'; } size_t memberCount = guild->getMemberCount(); if (memberCount == 1) { s << ", which has 1 member, " << guild->getMembersOnline().size() << " of them online."; } else { s << ", which has " << memberCount << " members, " << guild->getMembersOnline().size() << " of them online."; } } return s.str(); } Item* Player::getInventoryItem(slots_t slot) const { if (slot < CONST_SLOT_FIRST || slot > CONST_SLOT_LAST) { return nullptr; } return inventory[slot]; } void Player::addConditionSuppressions(uint32_t conditions) { conditionSuppressions |= conditions; } void Player::removeConditionSuppressions(uint32_t conditions) { conditionSuppressions &= ~conditions; } Item* Player::getWeapon(slots_t slot, bool ignoreAmmo) const { Item* item = inventory[slot]; if (!item) { return nullptr; } WeaponType_t weaponType = item->getWeaponType(); if (weaponType == WEAPON_NONE || weaponType == WEAPON_SHIELD || weaponType == WEAPON_AMMO) { return nullptr; } if (!ignoreAmmo && weaponType == WEAPON_DISTANCE) { const ItemType& it = Item::items[item->getID()]; if (it.ammoType != AMMO_NONE) { Item* ammoItem = inventory[CONST_SLOT_AMMO]; if (!ammoItem || ammoItem->getAmmoType() != it.ammoType) { return nullptr; } item = ammoItem; } } return item; } Item* Player::getWeapon(bool ignoreAmmo/* = false*/) const { Item* item = getWeapon(CONST_SLOT_LEFT, ignoreAmmo); if (item) { return item; } item = getWeapon(CONST_SLOT_RIGHT, ignoreAmmo); if (item) { return item; } return nullptr; } WeaponType_t Player::getWeaponType() const { Item* item = getWeapon(); if (!item) { return WEAPON_NONE; } return item->getWeaponType(); } int32_t Player::getWeaponSkill(const Item* item) const { if (!item) { return getSkillLevel(SKILL_FIST); } int32_t attackSkill; WeaponType_t weaponType = item->getWeaponType(); switch (weaponType) { case WEAPON_SWORD: { attackSkill = getSkillLevel(SKILL_SWORD); break; } case WEAPON_CLUB: { attackSkill = getSkillLevel(SKILL_CLUB); break; } case WEAPON_AXE: { attackSkill = getSkillLevel(SKILL_AXE); break; } case WEAPON_DISTANCE: { attackSkill = getSkillLevel(SKILL_DISTANCE); break; } default: { attackSkill = 0; break; } } return attackSkill; } int32_t Player::getArmor() const { int32_t armor = 0; static const slots_t armorSlots[] = {CONST_SLOT_HEAD, CONST_SLOT_NECKLACE, CONST_SLOT_ARMOR, CONST_SLOT_LEGS, CONST_SLOT_FEET, CONST_SLOT_RING}; for (slots_t slot : armorSlots) { Item* inventoryItem = inventory[slot]; if (inventoryItem) { armor += inventoryItem->getArmor(); } } return static_cast<int32_t>(armor * vocation->armorMultiplier); } void Player::getShieldAndWeapon(const Item*& shield, const Item*& weapon) const { shield = nullptr; weapon = nullptr; for (uint32_t slot = CONST_SLOT_RIGHT; slot <= CONST_SLOT_LEFT; slot++) { Item* item = inventory[slot]; if (!item) { continue; } switch (item->getWeaponType()) { case WEAPON_NONE: break; case WEAPON_SHIELD: { if (!shield || item->getDefense() > shield->getDefense()) { shield = item; } break; } default: { // weapons that are not shields weapon = item; break; } } } } int32_t Player::getDefense() const { int32_t baseDefense = 5; int32_t defenseValue = 0; int32_t defenseSkill = 0; int32_t extraDefense = 0; float defenseFactor = getDefenseFactor(); const Item* weapon; const Item* shield; getShieldAndWeapon(shield, weapon); if (weapon) { defenseValue = baseDefense + weapon->getDefense(); extraDefense = weapon->getExtraDefense(); defenseSkill = getWeaponSkill(weapon); } if (shield && shield->getDefense() >= defenseValue) { defenseValue = baseDefense + shield->getDefense() + extraDefense; defenseSkill = getSkillLevel(SKILL_SHIELD); } if (defenseSkill == 0) { return 0; } defenseValue = static_cast<int32_t>(defenseValue * vocation->defenseMultiplier); return static_cast<int32_t>(std::ceil((static_cast<float>(defenseSkill * (defenseValue * 0.015)) + (defenseValue * 0.1)) * defenseFactor)); } float Player::getAttackFactor() const { switch (fightMode) { case FIGHTMODE_ATTACK: return 1.0f; case FIGHTMODE_BALANCED: return 1.2f; case FIGHTMODE_DEFENSE: return 2.0f; default: return 1.0f; } } float Player::getDefenseFactor() const { switch (fightMode) { case FIGHTMODE_ATTACK: return 1.0f; case FIGHTMODE_BALANCED: return 1.2f; case FIGHTMODE_DEFENSE: { if ((OTSYS_TIME() - lastAttack) < getAttackSpeed()) { return 1.0f; } return 2.0f; } default: return 1.0f; } } uint16_t Player::getClientIcons() const { uint16_t icons = 0; for (Condition* condition : conditions) { if (!isSuppress(condition->getType())) { icons |= condition->getIcons(); } } if (pzLocked) { icons |= ICON_REDSWORDS; } if (tile->hasFlag(TILESTATE_PROTECTIONZONE)) { icons |= ICON_PIGEON; // Don't show ICON_SWORDS if player is in protection zone. if (hasBitSet(ICON_SWORDS, icons)) { icons &= ~ICON_SWORDS; } } // Game client debugs with 10 or more icons // so let's prevent that from happening. std::bitset<20> icon_bitset(static_cast<uint64_t>(icons)); for (size_t pos = 0, bits_set = icon_bitset.count(); bits_set >= 10; ++pos) { if (icon_bitset[pos]) { icon_bitset.reset(pos); --bits_set; } } return icon_bitset.to_ulong(); } void Player::updateInventoryWeight() { if (hasFlag(PlayerFlag_HasInfiniteCapacity)) { return; } inventoryWeight = 0; for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) { const Item* item = inventory[i]; if (item) { inventoryWeight += item->getWeight(); } } } void Player::addSkillAdvance(skills_t skill, uint64_t count) { uint64_t currReqTries = vocation->getReqSkillTries(skill, skills[skill].level); uint64_t nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1); if (currReqTries >= nextReqTries) { //player has reached max skill return; } g_events->eventPlayerOnGainSkillTries(this, skill, count); if (count == 0) { return; } bool sendUpdateSkills = false; while ((skills[skill].tries + count) >= nextReqTries) { count -= nextReqTries - skills[skill].tries; skills[skill].level++; skills[skill].tries = 0; skills[skill].percent = 0; std::ostringstream ss; ss << "You advanced to " << getSkillName(skill) << " level " << skills[skill].level << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); g_creatureEvents->playerAdvance(this, skill, (skills[skill].level - 1), skills[skill].level); sendUpdateSkills = true; currReqTries = nextReqTries; nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1); if (currReqTries >= nextReqTries) { count = 0; break; } } skills[skill].tries += count; uint32_t newPercent; if (nextReqTries > currReqTries) { newPercent = Player::getPercentLevel(skills[skill].tries, nextReqTries); } else { newPercent = 0; } if (skills[skill].percent != newPercent) { skills[skill].percent = newPercent; sendUpdateSkills = true; } if (sendUpdateSkills) { sendSkills(); } } void Player::setVarStats(stats_t stat, int32_t modifier) { varStats[stat] += modifier; switch (stat) { case STAT_MAXHITPOINTS: { if (getHealth() > getMaxHealth()) { Creature::changeHealth(getMaxHealth() - getHealth()); } else { g_game.addCreatureHealth(this); } break; } case STAT_MAXMANAPOINTS: { if (getMana() > getMaxMana()) { Creature::changeMana(getMaxMana() - getMana()); } break; } default: { break; } } } int32_t Player::getDefaultStats(stats_t stat) const { switch (stat) { case STAT_MAXHITPOINTS: return healthMax; case STAT_MAXMANAPOINTS: return manaMax; case STAT_MAGICPOINTS: return getBaseMagicLevel(); default: return 0; } } void Player::addContainer(uint8_t cid, Container* container) { if (cid > 0xF) { return; } if (container->getID() == ITEM_BROWSEFIELD) { container->incrementReferenceCounter(); } auto it = openContainers.find(cid); if (it != openContainers.end()) { OpenContainer& openContainer = it->second; Container* oldContainer = openContainer.container; if (oldContainer->getID() == ITEM_BROWSEFIELD) { oldContainer->decrementReferenceCounter(); } openContainer.container = container; openContainer.index = 0; } else { OpenContainer openContainer; openContainer.container = container; openContainer.index = 0; openContainers[cid] = openContainer; } } void Player::closeContainer(uint8_t cid) { auto it = openContainers.find(cid); if (it == openContainers.end()) { return; } OpenContainer openContainer = it->second; Container* container = openContainer.container; openContainers.erase(it); if (container && container->getID() == ITEM_BROWSEFIELD) { container->decrementReferenceCounter(); } } void Player::setContainerIndex(uint8_t cid, uint16_t index) { auto it = openContainers.find(cid); if (it == openContainers.end()) { return; } it->second.index = index; } Container* Player::getContainerByID(uint8_t cid) { auto it = openContainers.find(cid); if (it == openContainers.end()) { return nullptr; } return it->second.container; } int8_t Player::getContainerID(const Container* container) const { for (const auto& it : openContainers) { if (it.second.container == container) { return it.first; } } return -1; } uint16_t Player::getContainerIndex(uint8_t cid) const { auto it = openContainers.find(cid); if (it == openContainers.end()) { return 0; } return it->second.index; } bool Player::canOpenCorpse(uint32_t ownerId) const { return getID() == ownerId || (party && party->canOpenCorpse(ownerId)); } uint16_t Player::getLookCorpse() const { if (sex == PLAYERSEX_FEMALE) { return ITEM_FEMALE_CORPSE; } else { return ITEM_MALE_CORPSE; } } void Player::addStorageValue(const uint32_t key, const int32_t value, const bool isLogin/* = false*/) { if (IS_IN_KEYRANGE(key, RESERVED_RANGE)) { if (IS_IN_KEYRANGE(key, OUTFITS_RANGE)) { outfits.emplace_back( value >> 16, value & 0xFF ); return; } else if (IS_IN_KEYRANGE(key, MOUNTS_RANGE)) { // do nothing } else { std::cout << "Warning: unknown reserved key: " << key << " player: " << getName() << std::endl; return; } } if (value != -1) { int32_t oldValue; getStorageValue(key, oldValue); storageMap[key] = value; if (!isLogin) { auto currentFrameTime = g_dispatcher.getDispatcherCycle(); if (lastQuestlogUpdate != currentFrameTime && g_game.quests.isQuestStorage(key, value, oldValue)) { lastQuestlogUpdate = currentFrameTime; sendTextMessage(MESSAGE_EVENT_ADVANCE, "Your questlog has been updated."); } } } else { storageMap.erase(key); } } bool Player::getStorageValue(const uint32_t key, int32_t& value) const { auto it = storageMap.find(key); if (it == storageMap.end()) { value = -1; return false; } value = it->second; return true; } bool Player::canSee(const Position& pos) const { if (!client) { return false; } return client->canSee(pos); } bool Player::canSeeCreature(const Creature* creature) const { if (creature == this) { return true; } if (creature->isInGhostMode() && !group->access) { return false; } if (!creature->getPlayer() && !canSeeInvisibility() && creature->isInvisible()) { return false; } return true; } bool Player::canWalkthrough(const Creature* creature) const { if (group->access || creature->isInGhostMode()) { return true; } const Player* player = creature->getPlayer(); if (!player) { return false; } const Tile* playerTile = player->getTile(); if (!playerTile || !playerTile->hasFlag(TILESTATE_PROTECTIONZONE)) { return false; } const Item* playerTileGround = playerTile->getGround(); if (!playerTileGround || !playerTileGround->hasWalkStack()) { return false; } Player* thisPlayer = const_cast<Player*>(this); if ((OTSYS_TIME() - lastWalkthroughAttempt) > 2000) { thisPlayer->setLastWalkthroughAttempt(OTSYS_TIME()); return false; } if (creature->getPosition() != lastWalkthroughPosition) { thisPlayer->setLastWalkthroughPosition(creature->getPosition()); return false; } thisPlayer->setLastWalkthroughPosition(creature->getPosition()); return true; } bool Player::canWalkthroughEx(const Creature* creature) const { if (group->access) { return true; } const Player* player = creature->getPlayer(); if (!player) { return false; } const Tile* playerTile = player->getTile(); return playerTile && playerTile->hasFlag(TILESTATE_PROTECTIONZONE); } void Player::onReceiveMail() const { if (isNearDepotBox()) { sendTextMessage(MESSAGE_EVENT_ADVANCE, "New mail has arrived."); } } bool Player::isNearDepotBox() const { const Position& pos = getPosition(); for (int32_t cx = -1; cx <= 1; ++cx) { for (int32_t cy = -1; cy <= 1; ++cy) { Tile* tile = g_game.map.getTile(pos.x + cx, pos.y + cy, pos.z); if (!tile) { continue; } if (tile->hasFlag(TILESTATE_DEPOT)) { return true; } } } return false; } DepotChest* Player::getDepotChest(uint32_t depotId, bool autoCreate) { auto it = depotChests.find(depotId); if (it != depotChests.end()) { return it->second; } if (!autoCreate) { return nullptr; } DepotChest* depotChest = new DepotChest(ITEM_DEPOT); depotChest->incrementReferenceCounter(); depotChest->setMaxDepotItems(getMaxDepotItems()); depotChests[depotId] = depotChest; return depotChest; } DepotLocker* Player::getDepotLocker(uint32_t depotId) { auto it = depotLockerMap.find(depotId); if (it != depotLockerMap.end()) { inbox->setParent(it->second); return it->second; } DepotLocker* depotLocker = new DepotLocker(ITEM_LOCKER1); depotLocker->setDepotId(depotId); depotLocker->internalAddThing(Item::CreateItem(ITEM_MARKET)); depotLocker->internalAddThing(inbox); depotLocker->internalAddThing(getDepotChest(depotId, true)); depotLockerMap[depotId] = depotLocker; return depotLocker; } void Player::sendCancelMessage(ReturnValue message) const { sendCancelMessage(getReturnMessage(message)); } void Player::sendStats() { if (client) { client->sendStats(); lastStatsTrainingTime = getOfflineTrainingTime() / 60 / 1000; } } void Player::sendPing() { int64_t timeNow = OTSYS_TIME(); bool hasLostConnection = false; if ((timeNow - lastPing) >= 5000) { lastPing = timeNow; if (client) { client->sendPing(); } else { hasLostConnection = true; } } int64_t noPongTime = timeNow - lastPong; if ((hasLostConnection || noPongTime >= 7000) && attackedCreature && attackedCreature->getPlayer()) { setAttackedCreature(nullptr); } if (noPongTime >= 60000 && canLogout()) { if (g_creatureEvents->playerLogout(this)) { if (client) { client->logout(true, true); } else { g_game.removeCreature(this, true); } } } } Item* Player::getWriteItem(uint32_t& windowTextId, uint16_t& maxWriteLen) { windowTextId = this->windowTextId; maxWriteLen = this->maxWriteLen; return writeItem; } void Player::setWriteItem(Item* item, uint16_t maxWriteLen /*= 0*/) { windowTextId++; if (writeItem) { writeItem->decrementReferenceCounter(); } if (item) { writeItem = item; this->maxWriteLen = maxWriteLen; writeItem->incrementReferenceCounter(); } else { writeItem = nullptr; this->maxWriteLen = 0; } } House* Player::getEditHouse(uint32_t& windowTextId, uint32_t& listId) { windowTextId = this->windowTextId; listId = this->editListId; return editHouse; } void Player::setEditHouse(House* house, uint32_t listId /*= 0*/) { windowTextId++; editHouse = house; editListId = listId; } void Player::sendHouseWindow(House* house, uint32_t listId) const { if (!client) { return; } std::string text; if (house->getAccessList(listId, text)) { client->sendHouseWindow(windowTextId, text); } } //container void Player::sendAddContainerItem(const Container* container, const Item* item) { if (!client) { return; } for (const auto& it : openContainers) { const OpenContainer& openContainer = it.second; if (openContainer.container != container) { continue; } uint16_t slot = openContainer.index; if (container->getID() == ITEM_BROWSEFIELD) { uint16_t containerSize = container->size() - 1; uint16_t pageEnd = openContainer.index + container->capacity() - 1; if (containerSize > pageEnd) { slot = pageEnd; item = container->getItemByIndex(pageEnd); } else { slot = containerSize; } } else if (openContainer.index >= container->capacity()) { item = container->getItemByIndex(openContainer.index - 1); } client->sendAddContainerItem(it.first, slot, item); } } void Player::sendUpdateContainerItem(const Container* container, uint16_t slot, const Item* newItem) { if (!client) { return; } for (const auto& it : openContainers) { const OpenContainer& openContainer = it.second; if (openContainer.container != container) { continue; } if (slot < openContainer.index) { continue; } uint16_t pageEnd = openContainer.index + container->capacity(); if (slot >= pageEnd) { continue; } client->sendUpdateContainerItem(it.first, slot, newItem); } } void Player::sendRemoveContainerItem(const Container* container, uint16_t slot) { if (!client) { return; } for (auto& it : openContainers) { OpenContainer& openContainer = it.second; if (openContainer.container != container) { continue; } uint16_t& firstIndex = openContainer.index; if (firstIndex > 0 && firstIndex >= container->size() - 1) { firstIndex -= container->capacity(); sendContainer(it.first, container, false, firstIndex); } client->sendRemoveContainerItem(it.first, std::max<uint16_t>(slot, firstIndex), container->getItemByIndex(container->capacity() + firstIndex)); } } void Player::onUpdateTileItem(const Tile* tile, const Position& pos, const Item* oldItem, const ItemType& oldType, const Item* newItem, const ItemType& newType) { Creature::onUpdateTileItem(tile, pos, oldItem, oldType, newItem, newType); if (oldItem != newItem) { onRemoveTileItem(tile, pos, oldType, oldItem); } if (tradeState != TRADE_TRANSFER) { if (tradeItem && oldItem == tradeItem) { g_game.internalCloseTrade(this); } } } void Player::onRemoveTileItem(const Tile* tile, const Position& pos, const ItemType& iType, const Item* item) { Creature::onRemoveTileItem(tile, pos, iType, item); if (tradeState != TRADE_TRANSFER) { checkTradeState(item); if (tradeItem) { const Container* container = item->getContainer(); if (container && container->isHoldingItem(tradeItem)) { g_game.internalCloseTrade(this); } } } } void Player::onCreatureAppear(Creature* creature, bool isLogin) { Creature::onCreatureAppear(creature, isLogin); if (isLogin && creature == this) { for (int32_t slot = CONST_SLOT_FIRST; slot <= CONST_SLOT_LAST; ++slot) { Item* item = inventory[slot]; if (item) { item->startDecaying(); g_moveEvents->onPlayerEquip(this, item, static_cast<slots_t>(slot), false); } } for (Condition* condition : storedConditionList) { addCondition(condition); } storedConditionList.clear(); BedItem* bed = g_game.getBedBySleeper(guid); if (bed) { bed->wakeUp(this); } std::cout << name << " has logged in." << std::endl; if (guild) { guild->addMember(this); } int32_t offlineTime; if (getLastLogout() != 0) { // Not counting more than 21 days to prevent overflow when multiplying with 1000 (for milliseconds). offlineTime = std::min<int32_t>(time(nullptr) - getLastLogout(), 86400 * 21); } else { offlineTime = 0; } for (Condition* condition : getMuteConditions()) { condition->setTicks(condition->getTicks() - (offlineTime * 1000)); if (condition->getTicks() <= 0) { removeCondition(condition); } } g_game.checkPlayersRecord(); IOLoginData::updateOnlineStatus(guid, true); } } void Player::onAttackedCreatureDisappear(bool isLogout) { sendCancelTarget(); if (!isLogout) { sendTextMessage(MESSAGE_STATUS_SMALL, "Target lost."); } } void Player::onFollowCreatureDisappear(bool isLogout) { sendCancelTarget(); if (!isLogout) { sendTextMessage(MESSAGE_STATUS_SMALL, "Target lost."); } } void Player::onChangeZone(ZoneType_t zone) { if (zone == ZONE_PROTECTION) { if (attackedCreature && !hasFlag(PlayerFlag_IgnoreProtectionZone)) { setAttackedCreature(nullptr); onAttackedCreatureDisappear(false); } if (!group->access && isMounted()) { dismount(); g_game.internalCreatureChangeOutfit(this, defaultOutfit); wasMounted = true; } } else { if (wasMounted) { toggleMount(true); wasMounted = false; } } g_game.updateCreatureWalkthrough(this); sendIcons(); } void Player::onAttackedCreatureChangeZone(ZoneType_t zone) { if (zone == ZONE_PROTECTION) { if (!hasFlag(PlayerFlag_IgnoreProtectionZone)) { setAttackedCreature(nullptr); onAttackedCreatureDisappear(false); } } else if (zone == ZONE_NOPVP) { if (attackedCreature->getPlayer()) { if (!hasFlag(PlayerFlag_IgnoreProtectionZone)) { setAttackedCreature(nullptr); onAttackedCreatureDisappear(false); } } } else if (zone == ZONE_NORMAL) { //attackedCreature can leave a pvp zone if not pzlocked if (g_game.getWorldType() == WORLD_TYPE_NO_PVP) { if (attackedCreature->getPlayer()) { setAttackedCreature(nullptr); onAttackedCreatureDisappear(false); } } } } void Player::onRemoveCreature(Creature* creature, bool isLogout) { Creature::onRemoveCreature(creature, isLogout); if (creature == this) { if (isLogout) { loginPosition = getPosition(); } lastLogout = time(nullptr); if (eventWalk != 0) { setFollowCreature(nullptr); } if (tradePartner) { g_game.internalCloseTrade(this); } closeShopWindow(); clearPartyInvitations(); if (party) { party->leaveParty(this); } g_chat->removeUserFromAllChannels(*this); std::cout << getName() << " has logged out." << std::endl; if (guild) { guild->removeMember(this); } IOLoginData::updateOnlineStatus(guid, false); bool saved = false; for (uint32_t tries = 0; tries < 3; ++tries) { if (IOLoginData::savePlayer(this)) { saved = true; break; } } if (!saved) { std::cout << "Error while saving player: " << getName() << std::endl; } } } void Player::openShopWindow(Npc* npc, const std::list<ShopInfo>& shop) { shopItemList = shop; sendShop(npc); sendSaleItemList(); } bool Player::closeShopWindow(bool sendCloseShopWindow /*= true*/) { //unreference callbacks int32_t onBuy; int32_t onSell; Npc* npc = getShopOwner(onBuy, onSell); if (!npc) { shopItemList.clear(); return false; } setShopOwner(nullptr, -1, -1); npc->onPlayerEndTrade(this, onBuy, onSell); if (sendCloseShopWindow) { sendCloseShop(); } shopItemList.clear(); return true; } void Player::onWalk(Direction& dir) { Creature::onWalk(dir); setNextActionTask(nullptr); setNextAction(OTSYS_TIME() + getStepDuration(dir)); } void Player::onCreatureMove(Creature* creature, const Tile* newTile, const Position& newPos, const Tile* oldTile, const Position& oldPos, bool teleport) { Creature::onCreatureMove(creature, newTile, newPos, oldTile, oldPos, teleport); if (hasFollowPath && (creature == followCreature || (creature == this && followCreature))) { isUpdatingPath = false; g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, &g_game, getID()))); } if (creature != this) { return; } if (tradeState != TRADE_TRANSFER) { //check if we should close trade if (tradeItem && !Position::areInRange<1, 1, 0>(tradeItem->getPosition(), getPosition())) { g_game.internalCloseTrade(this); } if (tradePartner && !Position::areInRange<2, 2, 0>(tradePartner->getPosition(), getPosition())) { g_game.internalCloseTrade(this); } } // close modal windows if (!modalWindows.empty()) { // TODO: This shouldn't be hardcoded for (uint32_t modalWindowId : modalWindows) { if (modalWindowId == std::numeric_limits<uint32_t>::max()) { sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted."); break; } } modalWindows.clear(); } // leave market if (inMarket) { inMarket = false; } if (party) { party->updateSharedExperience(); } if (teleport || oldPos.z != newPos.z) { int32_t ticks = g_config.getNumber(ConfigManager::STAIRHOP_DELAY); if (ticks > 0) { if (Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_PACIFIED, ticks, 0)) { addCondition(condition); } } } } //container void Player::onAddContainerItem(const Item* item) { checkTradeState(item); } void Player::onUpdateContainerItem(const Container* container, const Item* oldItem, const Item* newItem) { if (oldItem != newItem) { onRemoveContainerItem(container, oldItem); } if (tradeState != TRADE_TRANSFER) { checkTradeState(oldItem); } } void Player::onRemoveContainerItem(const Container* container, const Item* item) { if (tradeState != TRADE_TRANSFER) { checkTradeState(item); if (tradeItem) { if (tradeItem->getParent() != container && container->isHoldingItem(tradeItem)) { g_game.internalCloseTrade(this); } } } } void Player::onCloseContainer(const Container* container) { if (!client) { return; } for (const auto& it : openContainers) { if (it.second.container == container) { client->sendCloseContainer(it.first); } } } void Player::onSendContainer(const Container* container) { if (!client) { return; } bool hasParent = container->hasParent(); for (const auto& it : openContainers) { const OpenContainer& openContainer = it.second; if (openContainer.container == container) { client->sendContainer(it.first, container, hasParent, openContainer.index); } } } //inventory void Player::onUpdateInventoryItem(Item* oldItem, Item* newItem) { if (oldItem != newItem) { onRemoveInventoryItem(oldItem); } if (tradeState != TRADE_TRANSFER) { checkTradeState(oldItem); } } void Player::onRemoveInventoryItem(Item* item) { if (tradeState != TRADE_TRANSFER) { checkTradeState(item); if (tradeItem) { const Container* container = item->getContainer(); if (container && container->isHoldingItem(tradeItem)) { g_game.internalCloseTrade(this); } } } } void Player::checkTradeState(const Item* item) { if (!tradeItem || tradeState == TRADE_TRANSFER) { return; } if (tradeItem == item) { g_game.internalCloseTrade(this); } else { const Container* container = dynamic_cast<const Container*>(item->getParent()); while (container) { if (container == tradeItem) { g_game.internalCloseTrade(this); break; } container = dynamic_cast<const Container*>(container->getParent()); } } } void Player::setNextWalkActionTask(SchedulerTask* task) { if (walkTaskEvent != 0) { g_scheduler.stopEvent(walkTaskEvent); walkTaskEvent = 0; } delete walkTask; walkTask = task; } void Player::setNextWalkTask(SchedulerTask* task) { if (nextStepEvent != 0) { g_scheduler.stopEvent(nextStepEvent); nextStepEvent = 0; } if (task) { nextStepEvent = g_scheduler.addEvent(task); resetIdleTime(); } } void Player::setNextActionTask(SchedulerTask* task) { if (actionTaskEvent != 0) { g_scheduler.stopEvent(actionTaskEvent); actionTaskEvent = 0; } if (task) { actionTaskEvent = g_scheduler.addEvent(task); resetIdleTime(); } } uint32_t Player::getNextActionTime() const { return std::max<int64_t>(SCHEDULER_MINTICKS, nextAction - OTSYS_TIME()); } void Player::onThink(uint32_t interval) { Creature::onThink(interval); sendPing(); MessageBufferTicks += interval; if (MessageBufferTicks >= 1500) { MessageBufferTicks = 0; addMessageBuffer(); } if (!getTile()->hasFlag(TILESTATE_NOLOGOUT) && !isAccessPlayer()) { idleTime += interval; const int32_t kickAfterMinutes = g_config.getNumber(ConfigManager::KICK_AFTER_MINUTES); if (idleTime > (kickAfterMinutes * 60000) + 60000) { kickPlayer(true); } else if (client && idleTime == 60000 * kickAfterMinutes) { std::ostringstream ss; ss << "You have been idle for " << kickAfterMinutes << " minutes. You will be disconnected in one minute if you are still idle then."; client->sendTextMessage(TextMessage(MESSAGE_STATUS_WARNING, ss.str())); } } if (g_game.getWorldType() != WORLD_TYPE_PVP_ENFORCED) { checkSkullTicks(interval); } addOfflineTrainingTime(interval); if (lastStatsTrainingTime != getOfflineTrainingTime() / 60 / 1000) { sendStats(); } } uint32_t Player::isMuted() const { if (hasFlag(PlayerFlag_CannotBeMuted)) { return 0; } int32_t muteTicks = 0; for (Condition* condition : conditions) { if (condition->getType() == CONDITION_MUTED && condition->getTicks() > muteTicks) { muteTicks = condition->getTicks(); } } return static_cast<uint32_t>(muteTicks) / 1000; } void Player::addMessageBuffer() { if (MessageBufferCount > 0 && g_config.getNumber(ConfigManager::MAX_MESSAGEBUFFER) != 0 && !hasFlag(PlayerFlag_CannotBeMuted)) { --MessageBufferCount; } } void Player::removeMessageBuffer() { if (hasFlag(PlayerFlag_CannotBeMuted)) { return; } const int32_t maxMessageBuffer = g_config.getNumber(ConfigManager::MAX_MESSAGEBUFFER); if (maxMessageBuffer != 0 && MessageBufferCount <= maxMessageBuffer + 1) { if (++MessageBufferCount > maxMessageBuffer) { uint32_t muteCount = 1; auto it = muteCountMap.find(guid); if (it != muteCountMap.end()) { muteCount = it->second; } uint32_t muteTime = 5 * muteCount * muteCount; muteCountMap[guid] = muteCount + 1; Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_MUTED, muteTime * 1000, 0); addCondition(condition); std::ostringstream ss; ss << "You are muted for " << muteTime << " seconds."; sendTextMessage(MESSAGE_STATUS_SMALL, ss.str()); } } } void Player::drainHealth(Creature* attacker, int32_t damage) { Creature::drainHealth(attacker, damage); sendStats(); } void Player::drainMana(Creature* attacker, int32_t manaLoss) { Creature::drainMana(attacker, manaLoss); sendStats(); } void Player::addManaSpent(uint64_t amount) { if (hasFlag(PlayerFlag_NotGainMana)) { return; } uint64_t currReqMana = vocation->getReqMana(magLevel); uint64_t nextReqMana = vocation->getReqMana(magLevel + 1); if (currReqMana >= nextReqMana) { //player has reached max magic level return; } g_events->eventPlayerOnGainSkillTries(this, SKILL_MAGLEVEL, amount); if (amount == 0) { return; } bool sendUpdateStats = false; while ((manaSpent + amount) >= nextReqMana) { amount -= nextReqMana - manaSpent; magLevel++; manaSpent = 0; std::ostringstream ss; ss << "You advanced to magic level " << magLevel << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); g_creatureEvents->playerAdvance(this, SKILL_MAGLEVEL, magLevel - 1, magLevel); sendUpdateStats = true; currReqMana = nextReqMana; nextReqMana = vocation->getReqMana(magLevel + 1); if (currReqMana >= nextReqMana) { return; } } manaSpent += amount; uint8_t oldPercent = magLevelPercent; if (nextReqMana > currReqMana) { magLevelPercent = Player::getPercentLevel(manaSpent, nextReqMana); } else { magLevelPercent = 0; } if (oldPercent != magLevelPercent) { sendUpdateStats = true; } if (sendUpdateStats) { sendStats(); } } void Player::addExperience(Creature* source, uint64_t exp, bool sendText/* = false*/) { uint64_t currLevelExp = Player::getExpForLevel(level); uint64_t nextLevelExp = Player::getExpForLevel(level + 1); uint64_t rawExp = exp; if (currLevelExp >= nextLevelExp) { //player has reached max level levelPercent = 0; sendStats(); return; } g_events->eventPlayerOnGainExperience(this, source, exp, rawExp); if (exp == 0) { return; } experience += exp; if (sendText) { std::string expString = std::to_string(exp) + (exp != 1 ? " experience points." : " experience point."); TextMessage message(MESSAGE_EXPERIENCE, "You gained " + expString); message.position = position; message.primary.value = exp; message.primary.color = TEXTCOLOR_WHITE_EXP; sendTextMessage(message); SpectatorHashSet spectators; g_game.map.getSpectators(spectators, position, false, true); spectators.erase(this); if (!spectators.empty()) { message.type = MESSAGE_EXPERIENCE_OTHERS; message.text = getName() + " gained " + expString; for (Creature* spectator : spectators) { spectator->getPlayer()->sendTextMessage(message); } } } uint32_t prevLevel = level; while (experience >= nextLevelExp) { ++level; healthMax += vocation->getHPGain(); health += vocation->getHPGain(); manaMax += vocation->getManaGain(); mana += vocation->getManaGain(); capacity += vocation->getCapGain(); currLevelExp = nextLevelExp; nextLevelExp = Player::getExpForLevel(level + 1); if (currLevelExp >= nextLevelExp) { //player has reached max level break; } } if (prevLevel != level) { health = healthMax; mana = manaMax; updateBaseSpeed(); setBaseSpeed(getBaseSpeed()); g_game.changeSpeed(this, 0); g_game.addCreatureHealth(this); if (party) { party->updateSharedExperience(); } g_creatureEvents->playerAdvance(this, SKILL_LEVEL, prevLevel, level); std::ostringstream ss; ss << "You advanced from Level " << prevLevel << " to Level " << level << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); } if (nextLevelExp > currLevelExp) { levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp); } else { levelPercent = 0; } sendStats(); } void Player::removeExperience(uint64_t exp, bool sendText/* = false*/) { if (experience == 0 || exp == 0) { return; } g_events->eventPlayerOnLoseExperience(this, exp); if (exp == 0) { return; } uint64_t lostExp = experience; experience = std::max<int64_t>(0, experience - exp); if (sendText) { lostExp -= experience; std::string expString = std::to_string(lostExp) + (lostExp != 1 ? " experience points." : " experience point."); TextMessage message(MESSAGE_EXPERIENCE, "You lost " + expString); message.position = position; message.primary.value = lostExp; message.primary.color = TEXTCOLOR_RED; sendTextMessage(message); SpectatorHashSet spectators; g_game.map.getSpectators(spectators, position, false, true); spectators.erase(this); if (!spectators.empty()) { message.type = MESSAGE_EXPERIENCE_OTHERS; message.text = getName() + " lost " + expString; for (Creature* spectator : spectators) { spectator->getPlayer()->sendTextMessage(message); } } } uint32_t oldLevel = level; uint64_t currLevelExp = Player::getExpForLevel(level); while (level > 1 && experience < currLevelExp) { --level; healthMax = std::max<int32_t>(0, healthMax - vocation->getHPGain()); manaMax = std::max<int32_t>(0, manaMax - vocation->getManaGain()); capacity = std::max<int32_t>(0, capacity - vocation->getCapGain()); currLevelExp = Player::getExpForLevel(level); } if (oldLevel != level) { health = healthMax; mana = manaMax; updateBaseSpeed(); setBaseSpeed(getBaseSpeed()); g_game.changeSpeed(this, 0); g_game.addCreatureHealth(this); if (party) { party->updateSharedExperience(); } std::ostringstream ss; ss << "You were downgraded from Level " << oldLevel << " to Level " << level << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); } uint64_t nextLevelExp = Player::getExpForLevel(level + 1); if (nextLevelExp > currLevelExp) { levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp); } else { levelPercent = 0; } sendStats(); } uint8_t Player::getPercentLevel(uint64_t count, uint64_t nextLevelCount) { if (nextLevelCount == 0) { return 0; } uint8_t result = (count * 100) / nextLevelCount; if (result > 100) { return 0; } return result; } void Player::onBlockHit() { if (shieldBlockCount > 0) { --shieldBlockCount; if (hasShield()) { addSkillAdvance(SKILL_SHIELD, 1); } } } void Player::onAttackedCreatureBlockHit(BlockType_t blockType) { lastAttackBlockType = blockType; switch (blockType) { case BLOCK_NONE: { addAttackSkillPoint = true; bloodHitCount = 30; shieldBlockCount = 30; break; } case BLOCK_DEFENSE: case BLOCK_ARMOR: { //need to draw blood every 30 hits if (bloodHitCount > 0) { addAttackSkillPoint = true; --bloodHitCount; } else { addAttackSkillPoint = false; } break; } default: { addAttackSkillPoint = false; break; } } } bool Player::hasShield() const { Item* item = inventory[CONST_SLOT_LEFT]; if (item && item->getWeaponType() == WEAPON_SHIELD) { return true; } item = inventory[CONST_SLOT_RIGHT]; if (item && item->getWeaponType() == WEAPON_SHIELD) { return true; } return false; } BlockType_t Player::blockHit(Creature* attacker, CombatType_t combatType, int32_t& damage, bool checkDefense /* = false*/, bool checkArmor /* = false*/, bool field /* = false*/) { BlockType_t blockType = Creature::blockHit(attacker, combatType, damage, checkDefense, checkArmor, field); if (attacker) { sendCreatureSquare(attacker, SQ_COLOR_BLACK); } if (blockType != BLOCK_NONE) { return blockType; } if (damage > 0) { for (int32_t slot = CONST_SLOT_FIRST; slot <= CONST_SLOT_LAST; ++slot) { if (!isItemAbilityEnabled(static_cast<slots_t>(slot))) { continue; } Item* item = inventory[slot]; if (!item) { continue; } const ItemType& it = Item::items[item->getID()]; if (it.abilities) { const int16_t& absorbPercent = it.abilities->absorbPercent[combatTypeToIndex(combatType)]; if (absorbPercent != 0) { damage -= std::round(damage * (absorbPercent / 100.)); uint16_t charges = item->getCharges(); if (charges != 0) { g_game.transformItem(item, item->getID(), charges - 1); } } if (field) { const int16_t& fieldAbsorbPercent = it.abilities->fieldAbsorbPercent[combatTypeToIndex(combatType)]; if (fieldAbsorbPercent != 0) { damage -= std::round(damage * (fieldAbsorbPercent / 100.)); uint16_t charges = item->getCharges(); if (charges != 0) { g_game.transformItem(item, item->getID(), charges - 1); } } } } } if (damage <= 0) { damage = 0; blockType = BLOCK_ARMOR; } } return blockType; } uint32_t Player::getIP() const { if (client) { return client->getIP(); } return 0; } void Player::death(Creature* lastHitCreature) { loginPosition = town->getTemplePosition(); if (skillLoss) { uint8_t unfairFightReduction = 100; bool lastHitPlayer = Player::lastHitIsPlayer(lastHitCreature); if (lastHitPlayer) { uint32_t sumLevels = 0; uint32_t inFightTicks = g_config.getNumber(ConfigManager::PZ_LOCKED); for (const auto& it : damageMap) { CountBlock_t cb = it.second; if ((OTSYS_TIME() - cb.ticks) <= inFightTicks) { Player* damageDealer = g_game.getPlayerByID(it.first); if (damageDealer) { sumLevels += damageDealer->getLevel(); } } } if (sumLevels > level) { double reduce = level / static_cast<double>(sumLevels); unfairFightReduction = std::max<uint8_t>(20, std::floor((reduce * 100) + 0.5)); } } //Magic level loss uint64_t sumMana = 0; uint64_t lostMana = 0; //sum up all the mana for (uint32_t i = 1; i <= magLevel; ++i) { sumMana += vocation->getReqMana(i); } sumMana += manaSpent; double deathLossPercent = getLostPercent() * (unfairFightReduction / 100.); lostMana = static_cast<uint64_t>(sumMana * deathLossPercent); while (lostMana > manaSpent && magLevel > 0) { lostMana -= manaSpent; manaSpent = vocation->getReqMana(magLevel); magLevel--; } manaSpent -= lostMana; uint64_t nextReqMana = vocation->getReqMana(magLevel + 1); if (nextReqMana > vocation->getReqMana(magLevel)) { magLevelPercent = Player::getPercentLevel(manaSpent, nextReqMana); } else { magLevelPercent = 0; } //Skill loss for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; ++i) { //for each skill uint64_t sumSkillTries = 0; for (uint16_t c = 11; c <= skills[i].level; ++c) { //sum up all required tries for all skill levels sumSkillTries += vocation->getReqSkillTries(i, c); } sumSkillTries += skills[i].tries; uint32_t lostSkillTries = static_cast<uint32_t>(sumSkillTries * deathLossPercent); while (lostSkillTries > skills[i].tries) { lostSkillTries -= skills[i].tries; if (skills[i].level <= 10) { skills[i].level = 10; skills[i].tries = 0; lostSkillTries = 0; break; } skills[i].tries = vocation->getReqSkillTries(i, skills[i].level); skills[i].level--; } skills[i].tries = std::max<int32_t>(0, skills[i].tries - lostSkillTries); skills[i].percent = Player::getPercentLevel(skills[i].tries, vocation->getReqSkillTries(i, skills[i].level)); } //Level loss uint64_t expLoss = static_cast<uint64_t>(experience * deathLossPercent); g_events->eventPlayerOnLoseExperience(this, expLoss); if (expLoss != 0) { uint32_t oldLevel = level; if (vocation->getId() == VOCATION_NONE || level > 7) { experience -= expLoss; } while (level > 1 && experience < Player::getExpForLevel(level)) { --level; healthMax = std::max<int32_t>(0, healthMax - vocation->getHPGain()); manaMax = std::max<int32_t>(0, manaMax - vocation->getManaGain()); capacity = std::max<int32_t>(0, capacity - vocation->getCapGain()); } if (oldLevel != level) { std::ostringstream ss; ss << "You were downgraded from Level " << oldLevel << " to Level " << level << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); } uint64_t currLevelExp = Player::getExpForLevel(level); uint64_t nextLevelExp = Player::getExpForLevel(level + 1); if (nextLevelExp > currLevelExp) { levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp); } else { levelPercent = 0; } } std::bitset<6> bitset(blessings); if (bitset[5]) { if (lastHitPlayer) { bitset.reset(5); blessings = bitset.to_ulong(); } else { blessings = 32; } } else { blessings = 0; } sendStats(); sendSkills(); sendReLoginWindow(unfairFightReduction); if (getSkull() == SKULL_BLACK) { health = 40; mana = 0; } else { health = healthMax; mana = manaMax; } auto it = conditions.begin(), end = conditions.end(); while (it != end) { Condition* condition = *it; if (condition->isPersistent()) { it = conditions.erase(it); condition->endCondition(this); onEndCondition(condition->getType()); delete condition; } else { ++it; } } } else { setLossSkill(true); auto it = conditions.begin(), end = conditions.end(); while (it != end) { Condition* condition = *it; if (condition->isPersistent()) { it = conditions.erase(it); condition->endCondition(this); onEndCondition(condition->getType()); delete condition; } else { ++it; } } health = healthMax; g_game.internalTeleport(this, getTemplePosition(), true); g_game.addCreatureHealth(this); onThink(EVENT_CREATURE_THINK_INTERVAL); onIdleStatus(); sendStats(); } } bool Player::dropCorpse(Creature* lastHitCreature, Creature* mostDamageCreature, bool lastHitUnjustified, bool mostDamageUnjustified) { if (getZone() != ZONE_PVP || !Player::lastHitIsPlayer(lastHitCreature)) { return Creature::dropCorpse(lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified); } setDropLoot(true); return false; } Item* Player::getCorpse(Creature* lastHitCreature, Creature* mostDamageCreature) { Item* corpse = Creature::getCorpse(lastHitCreature, mostDamageCreature); if (corpse && corpse->getContainer()) { std::ostringstream ss; if (lastHitCreature) { ss << "You recognize " << getNameDescription() << ". " << (getSex() == PLAYERSEX_FEMALE ? "She" : "He") << " was killed by " << lastHitCreature->getNameDescription() << '.'; } else { ss << "You recognize " << getNameDescription() << '.'; } corpse->setSpecialDescription(ss.str()); } return corpse; } void Player::addInFightTicks(bool pzlock /*= false*/) { if (hasFlag(PlayerFlag_NotGainInFight)) { return; } if (pzlock) { pzLocked = true; } Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_INFIGHT, g_config.getNumber(ConfigManager::PZ_LOCKED), 0); addCondition(condition); } void Player::removeList() { g_game.removePlayer(this); for (const auto& it : g_game.getPlayers()) { it.second->notifyStatusChange(this, VIPSTATUS_OFFLINE); } } void Player::addList() { for (const auto& it : g_game.getPlayers()) { it.second->notifyStatusChange(this, VIPSTATUS_ONLINE); } g_game.addPlayer(this); } void Player::kickPlayer(bool displayEffect) { g_creatureEvents->playerLogout(this); if (client) { client->logout(displayEffect, true); } else { g_game.removeCreature(this); } } void Player::notifyStatusChange(Player* loginPlayer, VipStatus_t status) { if (!client) { return; } auto it = VIPList.find(loginPlayer->guid); if (it == VIPList.end()) { return; } client->sendUpdatedVIPStatus(loginPlayer->guid, status); if (status == VIPSTATUS_ONLINE) { client->sendTextMessage(TextMessage(MESSAGE_STATUS_SMALL, loginPlayer->getName() + " has logged in.")); } else if (status == VIPSTATUS_OFFLINE) { client->sendTextMessage(TextMessage(MESSAGE_STATUS_SMALL, loginPlayer->getName() + " has logged out.")); } } bool Player::removeVIP(uint32_t vipGuid) { if (VIPList.erase(vipGuid) == 0) { return false; } IOLoginData::removeVIPEntry(accountNumber, vipGuid); return true; } bool Player::addVIP(uint32_t vipGuid, const std::string& vipName, VipStatus_t status) { if (VIPList.size() >= getMaxVIPEntries() || VIPList.size() == 200) { // max number of buddies is 200 in 9.53 sendTextMessage(MESSAGE_STATUS_SMALL, "You cannot add more buddies."); return false; } auto result = VIPList.insert(vipGuid); if (!result.second) { sendTextMessage(MESSAGE_STATUS_SMALL, "This player is already in your list."); return false; } IOLoginData::addVIPEntry(accountNumber, vipGuid, "", 0, false); if (client) { client->sendVIP(vipGuid, vipName, "", 0, false, status); } return true; } bool Player::addVIPInternal(uint32_t vipGuid) { if (VIPList.size() >= getMaxVIPEntries() || VIPList.size() == 200) { // max number of buddies is 200 in 9.53 return false; } return VIPList.insert(vipGuid).second; } bool Player::editVIP(uint32_t vipGuid, const std::string& description, uint32_t icon, bool notify) { auto it = VIPList.find(vipGuid); if (it == VIPList.end()) { return false; // player is not in VIP } IOLoginData::editVIPEntry(accountNumber, vipGuid, description, icon, notify); return true; } //close container and its child containers void Player::autoCloseContainers(const Container* container) { std::vector<uint32_t> closeList; for (const auto& it : openContainers) { Container* tmpContainer = it.second.container; while (tmpContainer) { if (tmpContainer->isRemoved() || tmpContainer == container) { closeList.push_back(it.first); break; } tmpContainer = dynamic_cast<Container*>(tmpContainer->getParent()); } } for (uint32_t containerId : closeList) { closeContainer(containerId); if (client) { client->sendCloseContainer(containerId); } } } bool Player::hasCapacity(const Item* item, uint32_t count) const { if (hasFlag(PlayerFlag_CannotPickupItem)) { return false; } if (hasFlag(PlayerFlag_HasInfiniteCapacity) || item->getTopParent() == this) { return true; } uint32_t itemWeight = item->getContainer() != nullptr ? item->getWeight() : item->getBaseWeight(); if (item->isStackable()) { itemWeight *= count; } return itemWeight <= getFreeCapacity(); } ReturnValue Player::queryAdd(int32_t index, const Thing& thing, uint32_t count, uint32_t flags, Creature*) const { const Item* item = thing.getItem(); if (item == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } bool childIsOwner = hasBitSet(FLAG_CHILDISOWNER, flags); if (childIsOwner) { //a child container is querying the player, just check if enough capacity bool skipLimit = hasBitSet(FLAG_NOLIMIT, flags); if (skipLimit || hasCapacity(item, count)) { return RETURNVALUE_NOERROR; } return RETURNVALUE_NOTENOUGHCAPACITY; } if (!item->isPickupable()) { return RETURNVALUE_CANNOTPICKUP; } ReturnValue ret = RETURNVALUE_NOERROR; const int32_t& slotPosition = item->getSlotPosition(); if ((slotPosition & SLOTP_HEAD) || (slotPosition & SLOTP_NECKLACE) || (slotPosition & SLOTP_BACKPACK) || (slotPosition & SLOTP_ARMOR) || (slotPosition & SLOTP_LEGS) || (slotPosition & SLOTP_FEET) || (slotPosition & SLOTP_RING)) { ret = RETURNVALUE_CANNOTBEDRESSED; } else if (slotPosition & SLOTP_TWO_HAND) { ret = RETURNVALUE_PUTTHISOBJECTINBOTHHANDS; } else if ((slotPosition & SLOTP_RIGHT) || (slotPosition & SLOTP_LEFT)) { if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) { ret = RETURNVALUE_CANNOTBEDRESSED; } else { ret = RETURNVALUE_PUTTHISOBJECTINYOURHAND; } } switch (index) { case CONST_SLOT_HEAD: { if (slotPosition & SLOTP_HEAD) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_NECKLACE: { if (slotPosition & SLOTP_NECKLACE) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_BACKPACK: { if (slotPosition & SLOTP_BACKPACK) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_ARMOR: { if (slotPosition & SLOTP_ARMOR) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_RIGHT: { if (slotPosition & SLOTP_RIGHT) { if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) { if (item->getWeaponType() != WEAPON_SHIELD) { ret = RETURNVALUE_CANNOTBEDRESSED; } else { const Item* leftItem = inventory[CONST_SLOT_LEFT]; if (leftItem) { if ((leftItem->getSlotPosition() | slotPosition) & SLOTP_TWO_HAND) { ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE; } else { ret = RETURNVALUE_NOERROR; } } else { ret = RETURNVALUE_NOERROR; } } } else if (slotPosition & SLOTP_TWO_HAND) { if (inventory[CONST_SLOT_LEFT] && inventory[CONST_SLOT_LEFT] != item) { ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE; } else { ret = RETURNVALUE_NOERROR; } } else if (inventory[CONST_SLOT_LEFT]) { const Item* leftItem = inventory[CONST_SLOT_LEFT]; WeaponType_t type = item->getWeaponType(), leftType = leftItem->getWeaponType(); if (leftItem->getSlotPosition() & SLOTP_TWO_HAND) { ret = RETURNVALUE_DROPTWOHANDEDITEM; } else if (item == leftItem && count == item->getItemCount()) { ret = RETURNVALUE_NOERROR; } else if (leftType == WEAPON_SHIELD && type == WEAPON_SHIELD) { ret = RETURNVALUE_CANONLYUSEONESHIELD; } else if (leftType == WEAPON_NONE || type == WEAPON_NONE || leftType == WEAPON_SHIELD || leftType == WEAPON_AMMO || type == WEAPON_SHIELD || type == WEAPON_AMMO) { ret = RETURNVALUE_NOERROR; } else { ret = RETURNVALUE_CANONLYUSEONEWEAPON; } } else { ret = RETURNVALUE_NOERROR; } } break; } case CONST_SLOT_LEFT: { if (slotPosition & SLOTP_LEFT) { if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) { WeaponType_t type = item->getWeaponType(); if (type == WEAPON_NONE || type == WEAPON_SHIELD) { ret = RETURNVALUE_CANNOTBEDRESSED; } else if (inventory[CONST_SLOT_RIGHT] && (slotPosition & SLOTP_TWO_HAND)) { ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE; } else { ret = RETURNVALUE_NOERROR; } } else if (slotPosition & SLOTP_TWO_HAND) { if (inventory[CONST_SLOT_RIGHT] && inventory[CONST_SLOT_RIGHT] != item) { ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE; } else { ret = RETURNVALUE_NOERROR; } } else if (inventory[CONST_SLOT_RIGHT]) { const Item* rightItem = inventory[CONST_SLOT_RIGHT]; WeaponType_t type = item->getWeaponType(), rightType = rightItem->getWeaponType(); if (rightItem->getSlotPosition() & SLOTP_TWO_HAND) { ret = RETURNVALUE_DROPTWOHANDEDITEM; } else if (item == rightItem && count == item->getItemCount()) { ret = RETURNVALUE_NOERROR; } else if (rightType == WEAPON_SHIELD && type == WEAPON_SHIELD) { ret = RETURNVALUE_CANONLYUSEONESHIELD; } else if (rightType == WEAPON_NONE || type == WEAPON_NONE || rightType == WEAPON_SHIELD || rightType == WEAPON_AMMO || type == WEAPON_SHIELD || type == WEAPON_AMMO) { ret = RETURNVALUE_NOERROR; } else { ret = RETURNVALUE_CANONLYUSEONEWEAPON; } } else { ret = RETURNVALUE_NOERROR; } } break; } case CONST_SLOT_LEGS: { if (slotPosition & SLOTP_LEGS) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_FEET: { if (slotPosition & SLOTP_FEET) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_RING: { if (slotPosition & SLOTP_RING) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_AMMO: { if ((slotPosition & SLOTP_AMMO) || g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_WHEREEVER: case -1: ret = RETURNVALUE_NOTENOUGHROOM; break; default: ret = RETURNVALUE_NOTPOSSIBLE; break; } if (ret == RETURNVALUE_NOERROR || ret == RETURNVALUE_NOTENOUGHROOM) { //need an exchange with source? const Item* inventoryItem = getInventoryItem(static_cast<slots_t>(index)); if (inventoryItem && (!inventoryItem->isStackable() || inventoryItem->getID() != item->getID())) { return RETURNVALUE_NEEDEXCHANGE; } //check if enough capacity if (!hasCapacity(item, count)) { return RETURNVALUE_NOTENOUGHCAPACITY; } if (!g_moveEvents->onPlayerEquip(const_cast<Player*>(this), const_cast<Item*>(item), static_cast<slots_t>(index), true)) { return RETURNVALUE_CANNOTBEDRESSED; } } return ret; } ReturnValue Player::queryMaxCount(int32_t index, const Thing& thing, uint32_t count, uint32_t& maxQueryCount, uint32_t flags) const { const Item* item = thing.getItem(); if (item == nullptr) { maxQueryCount = 0; return RETURNVALUE_NOTPOSSIBLE; } if (index == INDEX_WHEREEVER) { uint32_t n = 0; for (int32_t slotIndex = CONST_SLOT_FIRST; slotIndex <= CONST_SLOT_LAST; ++slotIndex) { Item* inventoryItem = inventory[slotIndex]; if (inventoryItem) { if (Container* subContainer = inventoryItem->getContainer()) { uint32_t queryCount = 0; subContainer->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), queryCount, flags); n += queryCount; //iterate through all items, including sub-containers (deep search) for (ContainerIterator it = subContainer->iterator(); it.hasNext(); it.advance()) { if (Container* tmpContainer = (*it)->getContainer()) { queryCount = 0; tmpContainer->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), queryCount, flags); n += queryCount; } } } else if (inventoryItem->isStackable() && item->equals(inventoryItem) && inventoryItem->getItemCount() < 100) { uint32_t remainder = (100 - inventoryItem->getItemCount()); if (queryAdd(slotIndex, *item, remainder, flags) == RETURNVALUE_NOERROR) { n += remainder; } } } else if (queryAdd(slotIndex, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { //empty slot if (item->isStackable()) { n += 100; } else { ++n; } } } maxQueryCount = n; } else { const Item* destItem = nullptr; const Thing* destThing = getThing(index); if (destThing) { destItem = destThing->getItem(); } if (destItem) { if (destItem->isStackable() && item->equals(destItem) && destItem->getItemCount() < 100) { maxQueryCount = 100 - destItem->getItemCount(); } else { maxQueryCount = 0; } } else if (queryAdd(index, *item, count, flags) == RETURNVALUE_NOERROR) { //empty slot if (item->isStackable()) { maxQueryCount = 100; } else { maxQueryCount = 1; } return RETURNVALUE_NOERROR; } } if (maxQueryCount < count) { return RETURNVALUE_NOTENOUGHROOM; } else { return RETURNVALUE_NOERROR; } } ReturnValue Player::queryRemove(const Thing& thing, uint32_t count, uint32_t flags) const { int32_t index = getThingIndex(&thing); if (index == -1) { return RETURNVALUE_NOTPOSSIBLE; } const Item* item = thing.getItem(); if (item == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } if (count == 0 || (item->isStackable() && count > item->getItemCount())) { return RETURNVALUE_NOTPOSSIBLE; } if (!item->isMoveable() && !hasBitSet(FLAG_IGNORENOTMOVEABLE, flags)) { return RETURNVALUE_NOTMOVEABLE; } return RETURNVALUE_NOERROR; } Cylinder* Player::queryDestination(int32_t& index, const Thing& thing, Item** destItem, uint32_t& flags) { if (index == 0 /*drop to capacity window*/ || index == INDEX_WHEREEVER) { *destItem = nullptr; const Item* item = thing.getItem(); if (item == nullptr) { return this; } bool autoStack = !((flags & FLAG_IGNOREAUTOSTACK) == FLAG_IGNOREAUTOSTACK); bool isStackable = item->isStackable(); std::vector<Container*> containers; for (uint32_t slotIndex = CONST_SLOT_FIRST; slotIndex <= CONST_SLOT_LAST; ++slotIndex) { Item* inventoryItem = inventory[slotIndex]; if (inventoryItem) { if (inventoryItem == tradeItem) { continue; } if (inventoryItem == item) { continue; } if (autoStack && isStackable) { //try find an already existing item to stack with if (queryAdd(slotIndex, *item, item->getItemCount(), 0) == RETURNVALUE_NOERROR) { if (inventoryItem->equals(item) && inventoryItem->getItemCount() < 100) { index = slotIndex; *destItem = inventoryItem; return this; } } if (Container* subContainer = inventoryItem->getContainer()) { containers.push_back(subContainer); } } else if (Container* subContainer = inventoryItem->getContainer()) { containers.push_back(subContainer); } } else if (queryAdd(slotIndex, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { //empty slot index = slotIndex; *destItem = nullptr; return this; } } size_t i = 0; while (i < containers.size()) { Container* tmpContainer = containers[i++]; if (!autoStack || !isStackable) { //we need to find first empty container as fast as we can for non-stackable items uint32_t n = tmpContainer->capacity() - tmpContainer->size(); while (n) { if (tmpContainer->queryAdd(tmpContainer->capacity() - n, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { index = tmpContainer->capacity() - n; *destItem = nullptr; return tmpContainer; } n--; } for (Item* tmpContainerItem : tmpContainer->getItemList()) { if (Container* subContainer = tmpContainerItem->getContainer()) { containers.push_back(subContainer); } } continue; } uint32_t n = 0; for (Item* tmpItem : tmpContainer->getItemList()) { if (tmpItem == tradeItem) { continue; } if (tmpItem == item) { continue; } //try find an already existing item to stack with if (tmpItem->equals(item) && tmpItem->getItemCount() < 100) { index = n; *destItem = tmpItem; return tmpContainer; } if (Container* subContainer = tmpItem->getContainer()) { containers.push_back(subContainer); } n++; } if (n < tmpContainer->capacity() && tmpContainer->queryAdd(n, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { index = n; *destItem = nullptr; return tmpContainer; } } return this; } Thing* destThing = getThing(index); if (destThing) { *destItem = destThing->getItem(); } Cylinder* subCylinder = dynamic_cast<Cylinder*>(destThing); if (subCylinder) { index = INDEX_WHEREEVER; *destItem = nullptr; return subCylinder; } else { return this; } } void Player::addThing(int32_t index, Thing* thing) { if (index < CONST_SLOT_FIRST || index > CONST_SLOT_LAST) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* item = thing->getItem(); if (!item) { return /*RETURNVALUE_NOTPOSSIBLE*/; } item->setParent(this); inventory[index] = item; //send to client sendInventoryItem(static_cast<slots_t>(index), item); } void Player::updateThing(Thing* thing, uint16_t itemId, uint32_t count) { int32_t index = getThingIndex(thing); if (index == -1) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* item = thing->getItem(); if (!item) { return /*RETURNVALUE_NOTPOSSIBLE*/; } item->setID(itemId); item->setSubType(count); //send to client sendInventoryItem(static_cast<slots_t>(index), item); //event methods onUpdateInventoryItem(item, item); } void Player::replaceThing(uint32_t index, Thing* thing) { if (index > CONST_SLOT_LAST) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* oldItem = getInventoryItem(static_cast<slots_t>(index)); if (!oldItem) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* item = thing->getItem(); if (!item) { return /*RETURNVALUE_NOTPOSSIBLE*/; } //send to client sendInventoryItem(static_cast<slots_t>(index), item); //event methods onUpdateInventoryItem(oldItem, item); item->setParent(this); inventory[index] = item; } void Player::removeThing(Thing* thing, uint32_t count) { Item* item = thing->getItem(); if (!item) { return /*RETURNVALUE_NOTPOSSIBLE*/; } int32_t index = getThingIndex(thing); if (index == -1) { return /*RETURNVALUE_NOTPOSSIBLE*/; } if (item->isStackable()) { if (count == item->getItemCount()) { //send change to client sendInventoryItem(static_cast<slots_t>(index), nullptr); //event methods onRemoveInventoryItem(item); item->setParent(nullptr); inventory[index] = nullptr; } else { uint8_t newCount = static_cast<uint8_t>(std::max<int32_t>(0, item->getItemCount() - count)); item->setItemCount(newCount); //send change to client sendInventoryItem(static_cast<slots_t>(index), item); //event methods onUpdateInventoryItem(item, item); } } else { //send change to client sendInventoryItem(static_cast<slots_t>(index), nullptr); //event methods onRemoveInventoryItem(item); item->setParent(nullptr); inventory[index] = nullptr; } } int32_t Player::getThingIndex(const Thing* thing) const { for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) { if (inventory[i] == thing) { return i; } } return -1; } size_t Player::getFirstIndex() const { return CONST_SLOT_FIRST; } size_t Player::getLastIndex() const { return CONST_SLOT_LAST + 1; } uint32_t Player::getItemTypeCount(uint16_t itemId, int32_t subType /*= -1*/) const { uint32_t count = 0; for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) { Item* item = inventory[i]; if (!item) { continue; } if (item->getID() == itemId) { count += Item::countByType(item, subType); } if (Container* container = item->getContainer()) { for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) { if ((*it)->getID() == itemId) { count += Item::countByType(*it, subType); } } } } return count; } bool Player::removeItemOfType(uint16_t itemId, uint32_t amount, int32_t subType, bool ignoreEquipped/* = false*/) const { if (amount == 0) { return true; } std::vector<Item*> itemList; uint32_t count = 0; for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) { Item* item = inventory[i]; if (!item) { continue; } if (!ignoreEquipped && item->getID() == itemId) { uint32_t itemCount = Item::countByType(item, subType); if (itemCount == 0) { continue; } itemList.push_back(item); count += itemCount; if (count >= amount) { g_game.internalRemoveItems(std::move(itemList), amount, Item::items[itemId].stackable); return true; } } else if (Container* container = item->getContainer()) { for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) { Item* containerItem = *it; if (containerItem->getID() == itemId) { uint32_t itemCount = Item::countByType(containerItem, subType); if (itemCount == 0) { continue; } itemList.push_back(containerItem); count += itemCount; if (count >= amount) { g_game.internalRemoveItems(std::move(itemList), amount, Item::items[itemId].stackable); return true; } } } } } return false; } std::map<uint32_t, uint32_t>& Player::getAllItemTypeCount(std::map<uint32_t, uint32_t>& countMap) const { for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) { Item* item = inventory[i]; if (!item) { continue; } countMap[item->getID()] += Item::countByType(item, -1); if (Container* container = item->getContainer()) { for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) { countMap[(*it)->getID()] += Item::countByType(*it, -1); } } } return countMap; } Thing* Player::getThing(size_t index) const { if (index >= CONST_SLOT_FIRST && index <= CONST_SLOT_LAST) { return inventory[index]; } return nullptr; } void Player::postAddNotification(Thing* thing, const Cylinder* oldParent, int32_t index, cylinderlink_t link /*= LINK_OWNER*/) { if (link == LINK_OWNER) { //calling movement scripts g_moveEvents->onPlayerEquip(this, thing->getItem(), static_cast<slots_t>(index), false); } bool requireListUpdate = true; if (link == LINK_OWNER || link == LINK_TOPPARENT) { const Item* i = (oldParent ? oldParent->getItem() : nullptr); // Check if we owned the old container too, so we don't need to do anything, // as the list was updated in postRemoveNotification assert(i ? i->getContainer() != nullptr : true); if (i) { requireListUpdate = i->getContainer()->getHoldingPlayer() != this; } else { requireListUpdate = oldParent != this; } updateInventoryWeight(); updateItemsLight(); sendStats(); } if (const Item* item = thing->getItem()) { if (const Container* container = item->getContainer()) { onSendContainer(container); } if (shopOwner && requireListUpdate) { updateSaleShopList(item); } } else if (const Creature* creature = thing->getCreature()) { if (creature == this) { //check containers std::vector<Container*> containers; for (const auto& it : openContainers) { Container* container = it.second.container; if (!Position::areInRange<1, 1, 0>(container->getPosition(), getPosition())) { containers.push_back(container); } } for (const Container* container : containers) { autoCloseContainers(container); } } } } void Player::postRemoveNotification(Thing* thing, const Cylinder* newParent, int32_t index, cylinderlink_t link /*= LINK_OWNER*/) { if (link == LINK_OWNER) { //calling movement scripts g_moveEvents->onPlayerDeEquip(this, thing->getItem(), static_cast<slots_t>(index)); } bool requireListUpdate = true; if (link == LINK_OWNER || link == LINK_TOPPARENT) { const Item* i = (newParent ? newParent->getItem() : nullptr); // Check if we owned the old container too, so we don't need to do anything, // as the list was updated in postRemoveNotification assert(i ? i->getContainer() != nullptr : true); if (i) { requireListUpdate = i->getContainer()->getHoldingPlayer() != this; } else { requireListUpdate = newParent != this; } updateInventoryWeight(); updateItemsLight(); sendStats(); } if (const Item* item = thing->getItem()) { if (const Container* container = item->getContainer()) { if (container->isRemoved() || !Position::areInRange<1, 1, 0>(getPosition(), container->getPosition())) { autoCloseContainers(container); } else if (container->getTopParent() == this) { onSendContainer(container); } else if (const Container* topContainer = dynamic_cast<const Container*>(container->getTopParent())) { if (const DepotChest* depotChest = dynamic_cast<const DepotChest*>(topContainer)) { bool isOwner = false; for (const auto& it : depotChests) { if (it.second == depotChest) { isOwner = true; onSendContainer(container); } } if (!isOwner) { autoCloseContainers(container); } } else { onSendContainer(container); } } else { autoCloseContainers(container); } } if (shopOwner && requireListUpdate) { updateSaleShopList(item); } } } bool Player::updateSaleShopList(const Item* item) { uint16_t itemId = item->getID(); if (itemId != ITEM_GOLD_COIN && itemId != ITEM_PLATINUM_COIN && itemId != ITEM_CRYSTAL_COIN) { auto it = std::find_if(shopItemList.begin(), shopItemList.end(), [itemId](const ShopInfo& shopInfo) { return shopInfo.itemId == itemId && shopInfo.sellPrice != 0; }); if (it == shopItemList.end()) { const Container* container = item->getContainer(); if (!container) { return false; } const auto& items = container->getItemList(); return std::any_of(items.begin(), items.end(), [this](const Item* containerItem) { return updateSaleShopList(containerItem); }); } } if (client) { client->sendSaleItemList(shopItemList); } return true; } bool Player::hasShopItemForSale(uint32_t itemId, uint8_t subType) const { const ItemType& itemType = Item::items[itemId]; return std::any_of(shopItemList.begin(), shopItemList.end(), [&](const ShopInfo& shopInfo) { return shopInfo.itemId == itemId && shopInfo.buyPrice != 0 && (!itemType.isFluidContainer() || shopInfo.subType == subType); }); } void Player::internalAddThing(Thing* thing) { internalAddThing(0, thing); } void Player::internalAddThing(uint32_t index, Thing* thing) { Item* item = thing->getItem(); if (!item) { return; } //index == 0 means we should equip this item at the most appropiate slot (no action required here) if (index > 0 && index < 11) { if (inventory[index]) { return; } inventory[index] = item; item->setParent(this); } } bool Player::setFollowCreature(Creature* creature) { if (!Creature::setFollowCreature(creature)) { setFollowCreature(nullptr); setAttackedCreature(nullptr); sendCancelMessage(RETURNVALUE_THEREISNOWAY); sendCancelTarget(); stopWalk(); return false; } return true; } bool Player::setAttackedCreature(Creature* creature) { if (!Creature::setAttackedCreature(creature)) { sendCancelTarget(); return false; } if (chaseMode && creature) { if (followCreature != creature) { //chase opponent setFollowCreature(creature); } } else if (followCreature) { setFollowCreature(nullptr); } if (creature) { g_dispatcher.addTask(createTask(std::bind(&Game::checkCreatureAttack, &g_game, getID()))); } return true; } void Player::goToFollowCreature() { if (!walkTask) { if ((OTSYS_TIME() - lastFailedFollow) < 2000) { return; } Creature::goToFollowCreature(); if (followCreature && !hasFollowPath) { lastFailedFollow = OTSYS_TIME(); } } } void Player::getPathSearchParams(const Creature* creature, FindPathParams& fpp) const { Creature::getPathSearchParams(creature, fpp); fpp.fullPathSearch = true; } void Player::doAttacking(uint32_t) { if (lastAttack == 0) { lastAttack = OTSYS_TIME() - getAttackSpeed() - 1; } if (hasCondition(CONDITION_PACIFIED)) { return; } if ((OTSYS_TIME() - lastAttack) >= getAttackSpeed()) { bool result = false; Item* tool = getWeapon(); const Weapon* weapon = g_weapons->getWeapon(tool); if (weapon) { if (!weapon->interruptSwing()) { result = weapon->useWeapon(this, tool, attackedCreature); } else if (!canDoAction()) { uint32_t delay = getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::checkCreatureAttack, &g_game, getID())); setNextActionTask(task); } else { result = weapon->useWeapon(this, tool, attackedCreature); } } else { result = Weapon::useFist(this, attackedCreature); } if (result) { lastAttack = OTSYS_TIME(); } } } uint64_t Player::getGainedExperience(Creature* attacker) const { if (g_config.getBoolean(ConfigManager::EXPERIENCE_FROM_PLAYERS)) { Player* attackerPlayer = attacker->getPlayer(); if (attackerPlayer && attackerPlayer != this && skillLoss && std::abs(static_cast<int32_t>(attackerPlayer->getLevel() - level)) <= g_config.getNumber(ConfigManager::EXP_FROM_PLAYERS_LEVEL_RANGE)) { return std::max<uint64_t>(0, std::floor(getLostExperience() * getDamageRatio(attacker) * 0.75)); } } return 0; } void Player::onFollowCreature(const Creature* creature) { if (!creature) { stopWalk(); } } void Player::setChaseMode(bool mode) { bool prevChaseMode = chaseMode; chaseMode = mode; if (prevChaseMode != chaseMode) { if (chaseMode) { if (!followCreature && attackedCreature) { //chase opponent setFollowCreature(attackedCreature); } } else if (attackedCreature) { setFollowCreature(nullptr); cancelNextWalk = true; } } } void Player::onWalkAborted() { setNextWalkActionTask(nullptr); sendCancelWalk(); } void Player::onWalkComplete() { if (walkTask) { walkTaskEvent = g_scheduler.addEvent(walkTask); walkTask = nullptr; } } void Player::stopWalk() { cancelNextWalk = true; } void Player::getCreatureLight(LightInfo& light) const { if (internalLight.level > itemsLight.level) { light = internalLight; } else { light = itemsLight; } } void Player::updateItemsLight(bool internal /*=false*/) { LightInfo maxLight; LightInfo curLight; for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) { Item* item = inventory[i]; if (item) { item->getLight(curLight); if (curLight.level > maxLight.level) { maxLight = curLight; } } } if (itemsLight.level != maxLight.level || itemsLight.color != maxLight.color) { itemsLight = maxLight; if (!internal) { g_game.changeLight(this); } } } void Player::onAddCondition(ConditionType_t type) { Creature::onAddCondition(type); if (type == CONDITION_OUTFIT && isMounted()) { dismount(); } sendIcons(); } void Player::onAddCombatCondition(ConditionType_t type) { switch (type) { case CONDITION_POISON: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are poisoned."); break; case CONDITION_DROWN: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are drowning."); break; case CONDITION_PARALYZE: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are paralyzed."); break; case CONDITION_DRUNK: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are drunk."); break; case CONDITION_CURSED: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are cursed."); break; case CONDITION_FREEZING: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are freezing."); break; case CONDITION_DAZZLED: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are dazzled."); break; case CONDITION_BLEEDING: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are bleeding."); break; default: break; } } void Player::onEndCondition(ConditionType_t type) { Creature::onEndCondition(type); if (type == CONDITION_INFIGHT) { onIdleStatus(); pzLocked = false; clearAttacked(); if (getSkull() != SKULL_RED && getSkull() != SKULL_BLACK) { setSkull(SKULL_NONE); } } sendIcons(); } void Player::onCombatRemoveCondition(Condition* condition) { //Creature::onCombatRemoveCondition(condition); if (condition->getId() > 0) { //Means the condition is from an item, id == slot if (g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) { Item* item = getInventoryItem(static_cast<slots_t>(condition->getId())); if (item) { //25% chance to destroy the item if (25 >= uniform_random(1, 100)) { g_game.internalRemoveItem(item); } } } } else { if (!canDoAction()) { const uint32_t delay = getNextActionTime(); const int32_t ticks = delay - (delay % EVENT_CREATURE_THINK_INTERVAL); if (ticks < 0) { removeCondition(condition); } else { condition->setTicks(ticks); } } else { removeCondition(condition); } } } void Player::onAttackedCreature(Creature* target) { Creature::onAttackedCreature(target); if (target->getZone() == ZONE_PVP) { return; } if (target == this) { addInFightTicks(); return; } if (hasFlag(PlayerFlag_NotGainInFight)) { return; } Player* targetPlayer = target->getPlayer(); if (targetPlayer && !isPartner(targetPlayer) && !isGuildMate(targetPlayer)) { if (!pzLocked && g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) { pzLocked = true; sendIcons(); } if (getSkull() == SKULL_NONE && getSkullClient(targetPlayer) == SKULL_YELLOW) { addAttacked(targetPlayer); targetPlayer->sendCreatureSkull(this); } else if (!targetPlayer->hasAttacked(this)) { if (!pzLocked) { pzLocked = true; sendIcons(); } if (!Combat::isInPvpZone(this, targetPlayer) && !isInWar(targetPlayer)) { addAttacked(targetPlayer); if (targetPlayer->getSkull() == SKULL_NONE && getSkull() == SKULL_NONE) { setSkull(SKULL_WHITE); } if (getSkull() == SKULL_NONE) { targetPlayer->sendCreatureSkull(this); } } } } addInFightTicks(); } void Player::onAttacked() { Creature::onAttacked(); addInFightTicks(); } void Player::onIdleStatus() { Creature::onIdleStatus(); if (party) { party->clearPlayerPoints(this); } } void Player::onPlacedCreature() { //scripting event - onLogin if (!g_creatureEvents->playerLogin(this)) { kickPlayer(true); } } void Player::onAttackedCreatureDrainHealth(Creature* target, int32_t points) { Creature::onAttackedCreatureDrainHealth(target, points); if (target) { if (party && !Combat::isPlayerCombat(target)) { Monster* tmpMonster = target->getMonster(); if (tmpMonster && tmpMonster->isHostile()) { //We have fulfilled a requirement for shared experience party->updatePlayerTicks(this, points); } } } } void Player::onTargetCreatureGainHealth(Creature* target, int32_t points) { if (target && party) { Player* tmpPlayer = nullptr; if (target->getPlayer()) { tmpPlayer = target->getPlayer(); } else if (Creature* targetMaster = target->getMaster()) { if (Player* targetMasterPlayer = targetMaster->getPlayer()) { tmpPlayer = targetMasterPlayer; } } if (isPartner(tmpPlayer)) { party->updatePlayerTicks(this, points); } } } bool Player::onKilledCreature(Creature* target, bool lastHit/* = true*/) { bool unjustified = false; if (hasFlag(PlayerFlag_NotGenerateLoot)) { target->setDropLoot(false); } Creature::onKilledCreature(target, lastHit); if (Player* targetPlayer = target->getPlayer()) { if (targetPlayer && targetPlayer->getZone() == ZONE_PVP) { targetPlayer->setDropLoot(false); targetPlayer->setLossSkill(false); } else if (!hasFlag(PlayerFlag_NotGainInFight) && !isPartner(targetPlayer)) { if (!Combat::isInPvpZone(this, targetPlayer) && hasAttacked(targetPlayer) && !targetPlayer->hasAttacked(this) && !isGuildMate(targetPlayer) && targetPlayer != this) { if (targetPlayer->getSkull() == SKULL_NONE && !isInWar(targetPlayer)) { unjustified = true; addUnjustifiedDead(targetPlayer); } if (lastHit && hasCondition(CONDITION_INFIGHT)) { pzLocked = true; Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_INFIGHT, g_config.getNumber(ConfigManager::WHITE_SKULL_TIME), 0); addCondition(condition); } } } } return unjustified; } void Player::gainExperience(uint64_t gainExp, Creature* source) { if (hasFlag(PlayerFlag_NotGainExperience) || gainExp == 0 || staminaMinutes == 0) { return; } addExperience(source, gainExp, true); } void Player::onGainExperience(uint64_t gainExp, Creature* target) { if (hasFlag(PlayerFlag_NotGainExperience)) { return; } if (target && !target->getPlayer() && party && party->isSharedExperienceActive() && party->isSharedExperienceEnabled()) { party->shareExperience(gainExp, target); //We will get a share of the experience through the sharing mechanism return; } Creature::onGainExperience(gainExp, target); gainExperience(gainExp, target); } void Player::onGainSharedExperience(uint64_t gainExp, Creature* source) { gainExperience(gainExp, source); } bool Player::isImmune(CombatType_t type) const { if (hasFlag(PlayerFlag_CannotBeAttacked)) { return true; } return Creature::isImmune(type); } bool Player::isImmune(ConditionType_t type) const { if (hasFlag(PlayerFlag_CannotBeAttacked)) { return true; } return Creature::isImmune(type); } bool Player::isAttackable() const { return !hasFlag(PlayerFlag_CannotBeAttacked); } bool Player::lastHitIsPlayer(Creature* lastHitCreature) { if (!lastHitCreature) { return false; } if (lastHitCreature->getPlayer()) { return true; } Creature* lastHitMaster = lastHitCreature->getMaster(); return lastHitMaster && lastHitMaster->getPlayer(); } void Player::changeHealth(int32_t healthChange, bool sendHealthChange/* = true*/) { Creature::changeHealth(healthChange, sendHealthChange); sendStats(); } void Player::changeMana(int32_t manaChange) { if (!hasFlag(PlayerFlag_HasInfiniteMana)) { Creature::changeMana(manaChange); } sendStats(); } void Player::changeSoul(int32_t soulChange) { if (soulChange > 0) { soul += std::min<int32_t>(soulChange, vocation->getSoulMax() - soul); } else { soul = std::max<int32_t>(0, soul + soulChange); } sendStats(); } bool Player::canWear(uint32_t lookType, uint8_t addons) const { if (group->access) { return true; } const Outfit* outfit = Outfits::getInstance().getOutfitByLookType(sex, lookType); if (!outfit) { return false; } if (outfit->premium && !isPremium()) { return false; } if (outfit->unlocked && addons == 0) { return true; } for (const OutfitEntry& outfitEntry : outfits) { if (outfitEntry.lookType != lookType) { continue; } return (outfitEntry.addons & addons) == addons; } return false; } bool Player::canLogout() { if (isConnecting) { return false; } if (getTile()->hasFlag(TILESTATE_NOLOGOUT)) { return false; } if (getTile()->hasFlag(TILESTATE_PROTECTIONZONE)) { return true; } return !isPzLocked() && !hasCondition(CONDITION_INFIGHT); } void Player::genReservedStorageRange() { //generate outfits range uint32_t base_key = PSTRG_OUTFITS_RANGE_START; for (const OutfitEntry& entry : outfits) { storageMap[++base_key] = (entry.lookType << 16) | entry.addons; } } void Player::addOutfit(uint16_t lookType, uint8_t addons) { for (OutfitEntry& outfitEntry : outfits) { if (outfitEntry.lookType == lookType) { outfitEntry.addons |= addons; return; } } outfits.emplace_back(lookType, addons); } bool Player::removeOutfit(uint16_t lookType) { for (auto it = outfits.begin(), end = outfits.end(); it != end; ++it) { OutfitEntry& entry = *it; if (entry.lookType == lookType) { outfits.erase(it); return true; } } return false; } bool Player::removeOutfitAddon(uint16_t lookType, uint8_t addons) { for (OutfitEntry& outfitEntry : outfits) { if (outfitEntry.lookType == lookType) { outfitEntry.addons &= ~addons; return true; } } return false; } bool Player::getOutfitAddons(const Outfit& outfit, uint8_t& addons) const { if (group->access) { addons = 3; return true; } if (outfit.premium && !isPremium()) { return false; } for (const OutfitEntry& outfitEntry : outfits) { if (outfitEntry.lookType != outfit.lookType) { continue; } addons = outfitEntry.addons; return true; } if (!outfit.unlocked) { return false; } addons = 0; return true; } void Player::setSex(PlayerSex_t newSex) { sex = newSex; } Skulls_t Player::getSkull() const { if (hasFlag(PlayerFlag_NotGainInFight)) { return SKULL_NONE; } return skull; } Skulls_t Player::getSkullClient(const Creature* creature) const { if (!creature || g_game.getWorldType() != WORLD_TYPE_PVP) { return SKULL_NONE; } const Player* player = creature->getPlayer(); if (player && player->getSkull() == SKULL_NONE) { if (isInWar(player)) { return SKULL_GREEN; } if (!player->getGuildWarVector().empty() && guild == player->getGuild()) { return SKULL_GREEN; } if (player->hasAttacked(this)) { return SKULL_YELLOW; } if (isPartner(player)) { return SKULL_GREEN; } } return Creature::getSkullClient(creature); } bool Player::hasAttacked(const Player* attacked) const { if (hasFlag(PlayerFlag_NotGainInFight) || !attacked) { return false; } return attackedSet.find(attacked->guid) != attackedSet.end(); } void Player::addAttacked(const Player* attacked) { if (hasFlag(PlayerFlag_NotGainInFight) || !attacked || attacked == this) { return; } attackedSet.insert(attacked->guid); } void Player::clearAttacked() { attackedSet.clear(); } void Player::addUnjustifiedDead(const Player* attacked) { if (hasFlag(PlayerFlag_NotGainInFight) || attacked == this || g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) { return; } sendTextMessage(MESSAGE_EVENT_ADVANCE, "Warning! The murder of " + attacked->getName() + " was not justified."); skullTicks += g_config.getNumber(ConfigManager::FRAG_TIME); if (getSkull() != SKULL_BLACK) { if (g_config.getNumber(ConfigManager::KILLS_TO_BLACK) != 0 && skullTicks > (g_config.getNumber(ConfigManager::KILLS_TO_BLACK) - 1) * static_cast<int64_t>(g_config.getNumber(ConfigManager::FRAG_TIME))) { setSkull(SKULL_BLACK); } else if (getSkull() != SKULL_RED && g_config.getNumber(ConfigManager::KILLS_TO_RED) != 0 && skullTicks > (g_config.getNumber(ConfigManager::KILLS_TO_RED) - 1) * static_cast<int64_t>(g_config.getNumber(ConfigManager::FRAG_TIME))) { setSkull(SKULL_RED); } } } void Player::checkSkullTicks(int32_t ticks) { int32_t newTicks = skullTicks - ticks; if (newTicks < 0) { skullTicks = 0; } else { skullTicks = newTicks; } if ((skull == SKULL_RED || skull == SKULL_BLACK) && skullTicks < 1000 && !hasCondition(CONDITION_INFIGHT)) { setSkull(SKULL_NONE); } } bool Player::isPromoted() const { uint16_t promotedVocation = g_vocations.getPromotedVocation(vocation->getId()); return promotedVocation == VOCATION_NONE && vocation->getId() != promotedVocation; } double Player::getLostPercent() const { int32_t blessingCount = std::bitset<5>(blessings).count(); int32_t deathLosePercent = g_config.getNumber(ConfigManager::DEATH_LOSE_PERCENT); if (deathLosePercent != -1) { if (isPromoted()) { deathLosePercent -= 3; } deathLosePercent -= blessingCount; return std::max<int32_t>(0, deathLosePercent) / 100.; } double lossPercent; if (level >= 25) { double tmpLevel = level + (levelPercent / 100.); lossPercent = static_cast<double>((tmpLevel + 50) * 50 * ((tmpLevel * tmpLevel) - (5 * tmpLevel) + 8)) / experience; } else { lossPercent = 10; } if (isPromoted()) { lossPercent *= 0.7; } return lossPercent * pow(0.92, blessingCount) / 100; } void Player::learnInstantSpell(const std::string& spellName) { if (!hasLearnedInstantSpell(spellName)) { learnedInstantSpellList.push_front(spellName); } } void Player::forgetInstantSpell(const std::string& spellName) { learnedInstantSpellList.remove(spellName); } bool Player::hasLearnedInstantSpell(const std::string& spellName) const { if (hasFlag(PlayerFlag_CannotUseSpells)) { return false; } if (hasFlag(PlayerFlag_IgnoreSpellCheck)) { return true; } for (const auto& learnedSpellName : learnedInstantSpellList) { if (strcasecmp(learnedSpellName.c_str(), spellName.c_str()) == 0) { return true; } } return false; } bool Player::isInWar(const Player* player) const { if (!player || !guild) { return false; } const Guild* playerGuild = player->getGuild(); if (!playerGuild) { return false; } return isInWarList(playerGuild->getId()) && player->isInWarList(guild->getId()); } bool Player::isInWarList(uint32_t guildId) const { return std::find(guildWarVector.begin(), guildWarVector.end(), guildId) != guildWarVector.end(); } bool Player::isPremium() const { if (g_config.getBoolean(ConfigManager::FREE_PREMIUM) || hasFlag(PlayerFlag_IsAlwaysPremium)) { return true; } return premiumDays > 0; } void Player::setPremiumDays(int32_t v) { premiumDays = v; sendBasicData(); } PartyShields_t Player::getPartyShield(const Player* player) const { if (!player) { return SHIELD_NONE; } if (party) { if (party->getLeader() == player) { if (party->isSharedExperienceActive()) { if (party->isSharedExperienceEnabled()) { return SHIELD_YELLOW_SHAREDEXP; } if (party->canUseSharedExperience(player)) { return SHIELD_YELLOW_NOSHAREDEXP; } return SHIELD_YELLOW_NOSHAREDEXP_BLINK; } return SHIELD_YELLOW; } if (player->party == party) { if (party->isSharedExperienceActive()) { if (party->isSharedExperienceEnabled()) { return SHIELD_BLUE_SHAREDEXP; } if (party->canUseSharedExperience(player)) { return SHIELD_BLUE_NOSHAREDEXP; } return SHIELD_BLUE_NOSHAREDEXP_BLINK; } return SHIELD_BLUE; } if (isInviting(player)) { return SHIELD_WHITEBLUE; } } if (player->isInviting(this)) { return SHIELD_WHITEYELLOW; } if (player->party) { return SHIELD_GRAY; } return SHIELD_NONE; } bool Player::isInviting(const Player* player) const { if (!player || !party || party->getLeader() != this) { return false; } return party->isPlayerInvited(player); } bool Player::isPartner(const Player* player) const { if (!player || !party) { return false; } return party == player->party; } bool Player::isGuildMate(const Player* player) const { if (!player || !guild) { return false; } return guild == player->guild; } void Player::sendPlayerPartyIcons(Player* player) { sendCreatureShield(player); sendCreatureSkull(player); } bool Player::addPartyInvitation(Party* party) { auto it = std::find(invitePartyList.begin(), invitePartyList.end(), party); if (it != invitePartyList.end()) { return false; } invitePartyList.push_front(party); return true; } void Player::removePartyInvitation(Party* party) { invitePartyList.remove(party); } void Player::clearPartyInvitations() { for (Party* invitingParty : invitePartyList) { invitingParty->removeInvite(*this, false); } invitePartyList.clear(); } GuildEmblems_t Player::getGuildEmblem(const Player* player) const { if (!player) { return GUILDEMBLEM_NONE; } const Guild* playerGuild = player->getGuild(); if (!playerGuild) { return GUILDEMBLEM_NONE; } if (player->getGuildWarVector().empty()) { if (guild == playerGuild) { return GUILDEMBLEM_MEMBER; } else { return GUILDEMBLEM_OTHER; } } else if (guild == playerGuild) { return GUILDEMBLEM_ALLY; } else if (isInWar(player)) { return GUILDEMBLEM_ENEMY; } return GUILDEMBLEM_NEUTRAL; } uint8_t Player::getCurrentMount() const { int32_t value; if (getStorageValue(PSTRG_MOUNTS_CURRENTMOUNT, value)) { return value; } return 0; } void Player::setCurrentMount(uint8_t mount) { addStorageValue(PSTRG_MOUNTS_CURRENTMOUNT, mount); } bool Player::toggleMount(bool mount) { if ((OTSYS_TIME() - lastToggleMount) < 3000 && !wasMounted) { sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED); return false; } if (mount) { if (isMounted()) { return false; } if (!group->access && tile->hasFlag(TILESTATE_PROTECTIONZONE)) { sendCancelMessage(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE); return false; } const Outfit* playerOutfit = Outfits::getInstance().getOutfitByLookType(getSex(), defaultOutfit.lookType); if (!playerOutfit) { return false; } uint8_t currentMountId = getCurrentMount(); if (currentMountId == 0) { sendOutfitWindow(); return false; } Mount* currentMount = g_game.mounts.getMountByID(currentMountId); if (!currentMount) { return false; } if (!hasMount(currentMount)) { setCurrentMount(0); sendOutfitWindow(); return false; } if (currentMount->premium && !isPremium()) { sendCancelMessage(RETURNVALUE_YOUNEEDPREMIUMACCOUNT); return false; } if (hasCondition(CONDITION_OUTFIT)) { sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return false; } defaultOutfit.lookMount = currentMount->clientId; if (currentMount->speed != 0) { g_game.changeSpeed(this, currentMount->speed); } } else { if (!isMounted()) { return false; } dismount(); } g_game.internalCreatureChangeOutfit(this, defaultOutfit); lastToggleMount = OTSYS_TIME(); return true; } bool Player::tameMount(uint8_t mountId) { if (!g_game.mounts.getMountByID(mountId)) { return false; } const uint8_t tmpMountId = mountId - 1; const uint32_t key = PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31); int32_t value; if (getStorageValue(key, value)) { value |= (1 << (tmpMountId % 31)); } else { value = (1 << (tmpMountId % 31)); } addStorageValue(key, value); return true; } bool Player::untameMount(uint8_t mountId) { if (!g_game.mounts.getMountByID(mountId)) { return false; } const uint8_t tmpMountId = mountId - 1; const uint32_t key = PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31); int32_t value; if (!getStorageValue(key, value)) { return true; } value &= ~(1 << (tmpMountId % 31)); addStorageValue(key, value); if (getCurrentMount() == mountId) { if (isMounted()) { dismount(); g_game.internalCreatureChangeOutfit(this, defaultOutfit); } setCurrentMount(0); } return true; } bool Player::hasMount(const Mount* mount) const { if (isAccessPlayer()) { return true; } if (mount->premium && !isPremium()) { return false; } const uint8_t tmpMountId = mount->id - 1; int32_t value; if (!getStorageValue(PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31), value)) { return false; } return ((1 << (tmpMountId % 31)) & value) != 0; } void Player::dismount() { Mount* mount = g_game.mounts.getMountByID(getCurrentMount()); if (mount && mount->speed > 0) { g_game.changeSpeed(this, -mount->speed); } defaultOutfit.lookMount = 0; } bool Player::addOfflineTrainingTries(skills_t skill, uint64_t tries) { if (tries == 0 || skill == SKILL_LEVEL) { return false; } bool sendUpdate = false; uint32_t oldSkillValue, newSkillValue; long double oldPercentToNextLevel, newPercentToNextLevel; if (skill == SKILL_MAGLEVEL) { uint64_t currReqMana = vocation->getReqMana(magLevel); uint64_t nextReqMana = vocation->getReqMana(magLevel + 1); if (currReqMana >= nextReqMana) { return false; } oldSkillValue = magLevel; oldPercentToNextLevel = static_cast<long double>(manaSpent * 100) / nextReqMana; g_events->eventPlayerOnGainSkillTries(this, SKILL_MAGLEVEL, tries); uint32_t currMagLevel = magLevel; while ((manaSpent + tries) >= nextReqMana) { tries -= nextReqMana - manaSpent; magLevel++; manaSpent = 0; g_creatureEvents->playerAdvance(this, SKILL_MAGLEVEL, magLevel - 1, magLevel); sendUpdate = true; currReqMana = nextReqMana; nextReqMana = vocation->getReqMana(magLevel + 1); if (currReqMana >= nextReqMana) { tries = 0; break; } } manaSpent += tries; if (magLevel != currMagLevel) { std::ostringstream ss; ss << "You advanced to magic level " << magLevel << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); } uint8_t newPercent; if (nextReqMana > currReqMana) { newPercent = Player::getPercentLevel(manaSpent, nextReqMana); newPercentToNextLevel = static_cast<long double>(manaSpent * 100) / nextReqMana; } else { newPercent = 0; newPercentToNextLevel = 0; } if (newPercent != magLevelPercent) { magLevelPercent = newPercent; sendUpdate = true; } newSkillValue = magLevel; } else { uint64_t currReqTries = vocation->getReqSkillTries(skill, skills[skill].level); uint64_t nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1); if (currReqTries >= nextReqTries) { return false; } oldSkillValue = skills[skill].level; oldPercentToNextLevel = static_cast<long double>(skills[skill].tries * 100) / nextReqTries; g_events->eventPlayerOnGainSkillTries(this, skill, tries); uint32_t currSkillLevel = skills[skill].level; while ((skills[skill].tries + tries) >= nextReqTries) { tries -= nextReqTries - skills[skill].tries; skills[skill].level++; skills[skill].tries = 0; skills[skill].percent = 0; g_creatureEvents->playerAdvance(this, skill, (skills[skill].level - 1), skills[skill].level); sendUpdate = true; currReqTries = nextReqTries; nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1); if (currReqTries >= nextReqTries) { tries = 0; break; } } skills[skill].tries += tries; if (currSkillLevel != skills[skill].level) { std::ostringstream ss; ss << "You advanced to " << getSkillName(skill) << " level " << skills[skill].level << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); } uint8_t newPercent; if (nextReqTries > currReqTries) { newPercent = Player::getPercentLevel(skills[skill].tries, nextReqTries); newPercentToNextLevel = static_cast<long double>(skills[skill].tries * 100) / nextReqTries; } else { newPercent = 0; newPercentToNextLevel = 0; } if (skills[skill].percent != newPercent) { skills[skill].percent = newPercent; sendUpdate = true; } newSkillValue = skills[skill].level; } if (sendUpdate) { sendSkills(); } std::ostringstream ss; ss << std::fixed << std::setprecision(2) << "Your " << ucwords(getSkillName(skill)) << " skill changed from level " << oldSkillValue << " (with " << oldPercentToNextLevel << "% progress towards level " << (oldSkillValue + 1) << ") to level " << newSkillValue << " (with " << newPercentToNextLevel << "% progress towards level " << (newSkillValue + 1) << ')'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); return sendUpdate; } bool Player::hasModalWindowOpen(uint32_t modalWindowId) const { return find(modalWindows.begin(), modalWindows.end(), modalWindowId) != modalWindows.end(); } void Player::onModalWindowHandled(uint32_t modalWindowId) { modalWindows.remove(modalWindowId); } void Player::sendModalWindow(const ModalWindow& modalWindow) { if (!client) { return; } modalWindows.push_front(modalWindow.id); client->sendModalWindow(modalWindow); } void Player::clearModalWindows() { modalWindows.clear(); } uint16_t Player::getHelpers() const { uint16_t helpers; if (guild && party) { std::unordered_set<Player*> helperSet; const auto& guildMembers = guild->getMembersOnline(); helperSet.insert(guildMembers.begin(), guildMembers.end()); const auto& partyMembers = party->getMembers(); helperSet.insert(partyMembers.begin(), partyMembers.end()); const auto& partyInvitees = party->getInvitees(); helperSet.insert(partyInvitees.begin(), partyInvitees.end()); helperSet.insert(party->getLeader()); helpers = helperSet.size(); } else if (guild) { helpers = guild->getMembersOnline().size(); } else if (party) { helpers = party->getMemberCount() + party->getInvitationCount() + 1; } else { helpers = 0; } return helpers; } void Player::sendClosePrivate(uint16_t channelId) { if (channelId == CHANNEL_GUILD || channelId == CHANNEL_PARTY) { g_chat->removeUserFromChannel(*this, channelId); } if (client) { client->sendClosePrivate(channelId); } } uint64_t Player::getMoney() const { std::vector<const Container*> containers; uint64_t moneyCount = 0; for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) { Item* item = inventory[i]; if (!item) { continue; } const Container* container = item->getContainer(); if (container) { containers.push_back(container); } else { moneyCount += item->getWorth(); } } size_t i = 0; while (i < containers.size()) { const Container* container = containers[i++]; for (const Item* item : container->getItemList()) { const Container* tmpContainer = item->getContainer(); if (tmpContainer) { containers.push_back(tmpContainer); } else { moneyCount += item->getWorth(); } } } return moneyCount; } size_t Player::getMaxVIPEntries() const { if (group->maxVipEntries != 0) { return group->maxVipEntries; } else if (isPremium()) { return 100; } return 20; } size_t Player::getMaxDepotItems() const { if (group->maxDepotItems != 0) { return group->maxDepotItems; } else if (isPremium()) { return 2000; } return 1000; } std::forward_list<Condition*> Player::getMuteConditions() const { std::forward_list<Condition*> muteConditions; for (Condition* condition : conditions) { if (condition->getTicks() <= 0) { continue; } ConditionType_t type = condition->getType(); if (type != CONDITION_MUTED && type != CONDITION_CHANNELMUTEDTICKS && type != CONDITION_YELLTICKS) { continue; } muteConditions.push_front(condition); } return muteConditions; } void Player::setGuild(Guild* guild) { if (guild == this->guild) { return; } Guild* oldGuild = this->guild; this->guildNick.clear(); this->guild = nullptr; this->guildRank = nullptr; if (guild) { const GuildRank* rank = guild->getRankByLevel(1); if (!rank) { return; } this->guild = guild; this->guildRank = rank; guild->addMember(this); } if (oldGuild) { oldGuild->removeMember(this); } }
1
13,772
wrong place hehe
otland-forgottenserver
cpp
@@ -0,0 +1,14 @@ +require 'travis/build/appliances/base' + +module Travis + module Build + module Appliances + class RestartMysql < Base + def apply + sh.cmd '(ls /var/run/mysqld/mysqld.sock >& /dev/null) || sudo service mysql restart' + end + end + end + end +end +
1
1
13,102
Can this be `test -e /var/run/mysqld/mysqld.sock` instead of `ls`? Using `ls` for noninteractive stuff is a recipe for sadness, IMHO. /cc @tianon
travis-ci-travis-build
rb
@@ -30,12 +30,12 @@ class Proposal < ActiveRecord::Base has_many :individual_approvals, ->{ individual }, class_name: 'Approvals::Individual' has_many :approvers, through: :individual_approvals, source: :user has_many :api_tokens, through: :individual_approvals - has_many :attachments + has_many :attachments, dependent: :destroy has_many :approval_delegates, through: :approvers, source: :outgoing_delegates - has_many :comments + has_many :comments, dependent: :destroy has_many :observations, -> { where("proposal_roles.role_id in (select roles.id from roles where roles.name='observer')") } has_many :observers, through: :observations, source: :user - belongs_to :client_data, polymorphic: true + belongs_to :client_data, polymorphic: true, dependent: :destroy belongs_to :requester, class_name: 'User' # The following list also servers as an interface spec for client_datas
1
class Proposal < ActiveRecord::Base include WorkflowModel include ValueHelper has_paper_trail class_name: 'C2Version' CLIENT_MODELS = [] # this gets populated later FLOWS = %w(parallel linear).freeze workflow do state :pending do event :approve, :transitions_to => :approved event :restart, :transitions_to => :pending event :cancel, :transitions_to => :cancelled end state :approved do event :restart, :transitions_to => :pending event :cancel, :transitions_to => :cancelled event :approve, :transitions_to => :approved do halt # no need to trigger a state transition end end state :cancelled do event :approve, :transitions_to => :cancelled do halt # can't escape end end end has_many :approvals has_many :individual_approvals, ->{ individual }, class_name: 'Approvals::Individual' has_many :approvers, through: :individual_approvals, source: :user has_many :api_tokens, through: :individual_approvals has_many :attachments has_many :approval_delegates, through: :approvers, source: :outgoing_delegates has_many :comments has_many :observations, -> { where("proposal_roles.role_id in (select roles.id from roles where roles.name='observer')") } has_many :observers, through: :observations, source: :user belongs_to :client_data, polymorphic: true belongs_to :requester, class_name: 'User' # The following list also servers as an interface spec for client_datas # Note: clients may implement: # :fields_for_display # :public_identifier # :version # Note: clients should also implement :version delegate :client, to: :client_data, allow_nil: true validates :client_data_type, inclusion: { in: ->(_) { self.client_model_names }, message: "%{value} is not a valid client model type. Valid client model types are: #{CLIENT_MODELS.inspect}", allow_blank: true } validates :flow, presence: true, inclusion: {in: FLOWS} validates :requester_id, presence: true self.statuses.each do |status| scope status, -> { where(status: status) } end scope :closed, -> { where(status: ['approved', 'cancelled']) } #TODO: Backfill to change approvals in 'reject' status to 'cancelled' status scope :cancelled, -> { where(status: 'cancelled') } after_create :update_public_id # @todo - this should probably be the only entry into the approval system def root_approval self.approvals.where(parent: nil).first end def parallel? self.flow == 'parallel' end def linear? self.flow == 'linear' end def delegate?(user) self.approval_delegates.exists?(assignee_id: user.id) end def existing_approval_for(user) where_clause = <<-SQL user_id = :user_id OR user_id IN (SELECT assigner_id FROM approval_delegates WHERE assignee_id = :user_id) OR user_id IN (SELECT assignee_id FROM approval_delegates WHERE assigner_id = :user_id) SQL self.approvals.where(where_clause, user_id: user.id).first end # TODO convert to an association def delegates self.approval_delegates.map(&:assignee) end # Returns a list of all users involved with the Proposal. def users # TODO use SQL results = self.approvers + self.observers + self.delegates + [self.requester] results.compact.uniq end def root_approval=(root) old_approvals = self.approvals.to_a approval_list = root.pre_order_tree_traversal approval_list.each { |a| a.proposal = self } self.approvals = approval_list # position may be out of whack, so we reset it approval_list.each_with_index do |approval, idx| approval.set_list_position(idx + 1) # start with 1 end self.clean_up_old_approvals(old_approvals, approval_list) root.initialize! self.reset_status() end def clean_up_old_approvals(old_approvals, approval_list) # destroy any old approvals that are not a part of approval_list (old_approvals - approval_list).each do |appr| appr.destroy() if Approval.exists?(appr.id) end end # convenience wrapper for setting a single approver def approver=(approver) # Don't recreate the approval existing = self.existing_approval_for(approver) if existing.nil? self.root_approval = Approvals::Individual.new(user: approver) end end def reset_status() unless self.cancelled? # no escape from cancelled if self.root_approval.nil? || self.root_approval.approved? self.update(status: 'approved') else self.update(status: 'pending') end end end def existing_observation_for(user) observations.find_by(user: user) end def add_observer(email_or_user, adder=nil, reason=nil) user = find_user(email_or_user) unless existing_observation_for(user) create_new_observation(user, adder, reason) end end def add_requester(email) user = User.for_email(email) self.set_requester(user) end def set_requester(user) self.update_attributes!(requester_id: user.id) end # Approvals in which someone can take action def currently_awaiting_approvals self.individual_approvals.actionable end def currently_awaiting_approvers self.approvers.merge(self.currently_awaiting_approvals) end def awaiting_approver?(user) self.currently_awaiting_approvers.include?(user) end # delegated, with a fallback # TODO refactor to class method in a module def delegate_with_default(method) data = self.client_data result = nil if data && data.respond_to?(method) result = data.public_send(method) end if result.present? result elsif block_given? yield else result end end ## delegated methods ## def public_identifier self.delegate_with_default(:public_identifier) { "##{self.id}" } end def name self.delegate_with_default(:name) { "Request #{self.public_identifier}" } end def fields_for_display # TODO better default self.delegate_with_default(:fields_for_display) { [] } end # Be careful if altering the identifier. You run the risk of "expiring" all # pending approval emails def version [ self.updated_at.to_i, self.client_data.try(:version) ].compact.max end ####################### def restart # Note that none of the state machine's history is stored self.api_tokens.update_all(expires_at: Time.zone.now) self.approvals.update_all(status: 'pending') if self.root_approval self.root_approval.initialize! end Dispatcher.deliver_new_proposal_emails(self) end # Returns True if the user is an "active" approver and has acted on the proposal def is_active_approver?(user) self.individual_approvals.non_pending.exists?(user_id: user.id) end def self.client_model_names CLIENT_MODELS.map(&:to_s) end def self.client_slugs CLIENT_MODELS.map(&:client) end protected def update_public_id self.update_attribute(:public_id, self.public_identifier) end def create_new_observation(user, adder, reason) ObservationCreator.new( observer: user, proposal_id: id, reason: reason, observer_adder: adder ).run end private def find_user(email_or_user) if email_or_user.is_a?(User) email_or_user else User.for_email(email_or_user) end end end
1
15,064
Are these `dependent: destroy` attributes intended to implement cascading deletes? I had assumed they were added because of the `paranoia` gem, but since that is no longer part of this PR, are they here as a best practice?
18F-C2
rb
@@ -1452,6 +1452,7 @@ static void establish_tunnel(h2o_req_t *req, h2o_tunnel_t *tunnel, uint64_t idle h2o_iovec_t datagram_flow_id = {}; if (stream->tunnel == NULL) { + tunnel->destroy(tunnel); /* the tunnel has been closed in the meantime */ return; }
1
/* * Copyright (c) 2018 Fastly, Kazuho Oku * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <sys/socket.h> #include "khash.h" #include "h2o/absprio.h" #include "h2o/http3_common.h" #include "h2o/http3_server.h" #include "h2o/http3_internal.h" #include "./../probes_.h" /** * the scheduler */ struct st_h2o_http3_req_scheduler_t { struct { struct { h2o_linklist_t high; h2o_linklist_t low; } urgencies[H2O_ABSPRIO_NUM_URGENCY_LEVELS]; size_t smallest_urgency; } active; h2o_linklist_t conn_blocked; }; /** * */ struct st_h2o_http3_req_scheduler_node_t { h2o_linklist_t link; h2o_absprio_t priority; uint64_t call_cnt; }; /** * callback used to compare precedence of the entries within the same urgency level (e.g., by comparing stream IDs) */ typedef int (*h2o_http3_req_scheduler_compare_cb)(struct st_h2o_http3_req_scheduler_t *sched, const struct st_h2o_http3_req_scheduler_node_t *x, const struct st_h2o_http3_req_scheduler_node_t *y); /** * Once the size of the request body being received exceeds thit limit, streaming mode will be used (if possible), and the * concurrency of such requests would be limited to one per connection. */ #define H2O_HTTP3_REQUEST_BODY_MIN_BYTES_TO_BLOCK 10240 enum h2o_http3_server_stream_state { /** * receiving headers */ H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS, /** * receiving request body (runs concurrently) */ H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK, /** * blocked, waiting to be unblocked one by one (either in streaming mode or in non-streaming mode) */ H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED, /** * in non-streaming mode, receiving body */ H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_UNBLOCKED, /** * in non-streaming mode, waiting for the request to be processed */ H2O_HTTP3_SERVER_STREAM_STATE_REQ_PENDING, /** * request has been processed, waiting for the response headers */ H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, /** * sending body (the generator MAY have closed, but the transmission to the client is still ongoing) */ H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY, /** * all data has been sent and ACKed, waiting for the transport stream to close (req might be disposed when entering this state) */ H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT }; struct st_h2o_http3_server_stream_t; KHASH_MAP_INIT_INT64(stream, struct st_h2o_http3_server_stream_t *) struct st_h2o_http3_server_conn_t { h2o_conn_t super; h2o_http3_conn_t h3; ptls_handshake_properties_t handshake_properties; h2o_linklist_t _conns; /* linklist to h2o_context_t::http3._conns */ /** * link-list of pending requests using st_h2o_http3_server_stream_t::link */ struct { /** * holds streams in RECV_BODY_BLOCKED state. They are promoted one by one to the POST_BLOCK State. */ h2o_linklist_t recv_body_blocked; /** * holds streams that are in request streaming mode. */ h2o_linklist_t req_streaming; /** * holds streams in REQ_PENDING state or RECV_BODY_POST_BLOCK state (that is using streaming; i.e., write_req.cb != NULL). */ h2o_linklist_t pending; } delayed_streams; /** * next application-level timeout */ h2o_timer_t timeout; /** * counter (the order MUST match that of h2o_http3_server_stream_state; it is accessed by index via the use of counters[]) */ union { struct { uint32_t recv_headers; uint32_t recv_body_before_block; uint32_t recv_body_blocked; uint32_t recv_body_unblocked; uint32_t req_pending; uint32_t send_headers; uint32_t send_body; uint32_t close_wait; }; uint32_t counters[1]; } num_streams; /** * Number of streams that is request streaming. The state can be in either one of SEND_HEADERS, SEND_BODY, CLOSE_WAIT. */ uint32_t num_streams_req_streaming; /** * scheduler */ struct { /** * States for request streams. */ struct st_h2o_http3_req_scheduler_t reqs; /** * States for unidirectional streams. Each element is a bit vector where slot for each stream is defined as: 1 << stream_id. */ struct { uint16_t active; uint16_t conn_blocked; } uni; } scheduler; /** * stream map used for datagram flows */ khash_t(stream) * datagram_flows; }; /** * sendvec, with additional field that contains the starting offset of the content */ struct st_h2o_http3_server_sendvec_t { h2o_sendvec_t vec; /** * Starting offset of the content carried by the vector, or UINT64_MAX if it is not carrying body */ uint64_t entity_offset; }; struct st_h2o_http3_server_stream_t { quicly_stream_t *quic; struct { h2o_buffer_t *buf; int (*handle_input)(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, const char **err_desc); uint64_t bytes_left_in_data_frame; } recvbuf; struct { H2O_VECTOR(struct st_h2o_http3_server_sendvec_t) vecs; size_t off_within_first_vec; size_t min_index_to_addref; uint64_t final_size, final_body_size; uint8_t data_frame_header_buf[9]; } sendbuf; enum h2o_http3_server_stream_state state; h2o_linklist_t link; h2o_ostream_t ostr_final; struct st_h2o_http3_req_scheduler_node_t scheduler; /** * if read is blocked */ uint8_t read_blocked : 1; /** * if h2o_proceed_response has been invoked, or if the invocation has been requested */ uint8_t proceed_requested : 1; /** * this flag is set by on_send_emit, triggers the invocation h2o_proceed_response in scheduler_do_send, used by do_send to * take different actions based on if it has been called while scheduler_do_send is running. */ uint8_t proceed_while_sending : 1; /** * if a PRIORITY_UPDATE frame has been received */ uint8_t received_priority_update : 1; /** * used in CLOSE_WAIT state to determine if h2o_dispose_request has been called */ uint8_t req_disposed : 1; /** * buffer to hold the request body (or a chunk of, if in streaming mode), or CONNECT payload */ h2o_buffer_t *req_body; /** * tunnel, if used. This object is instantiated when the request is being processed, whereas `tunnel->tunnel` is assigned when * a tunnel is established (i.e. when 2xx response is being received). */ struct st_h2o_http3_server_tunnel_t { /** * Pointer to the tunnel that is connected to the origin. This object is destroyed as soon as an error is reported on either * the read side or the write side of the tunnel. The send side of the H3 stream connected to the client is FINed when the * tunnel is destroyed; therefore, `quicly_sendstate_is_open(&stream->quic->sendstate) == (stream->tunnel->tunnel != NULL)`. */ h2o_tunnel_t *tunnel; struct st_h2o_http3_server_stream_t *stream; /** * While waiting for the generator to respond, this field contains the flow-id offered by the cilent. When the generator * provides a successful response, the stream is registered as a datagram flow, or this id is set to UINT64_MAX if the * establish tunnel did not meet the requirements. */ uint64_t datagram_flow_id; struct { h2o_timer_t delayed_write; char bytes_inflight[16384]; unsigned is_inflight : 1; } up; } * tunnel; /** * the request. Placed at the end, as it holds the pool. */ h2o_req_t req; }; static void on_stream_destroy(quicly_stream_t *qs, int err); static int handle_input_post_trailers(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, const char **err_desc); static int handle_input_expect_data(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, const char **err_desc); static void tunnel_write(struct st_h2o_http3_server_stream_t *stream); static void tunnel_write_delayed(h2o_timer_t *timer); static void req_scheduler_init(struct st_h2o_http3_req_scheduler_t *sched) { size_t i; for (i = 0; i < H2O_ABSPRIO_NUM_URGENCY_LEVELS; ++i) { h2o_linklist_init_anchor(&sched->active.urgencies[i].high); h2o_linklist_init_anchor(&sched->active.urgencies[i].low); } sched->active.smallest_urgency = i; h2o_linklist_init_anchor(&sched->conn_blocked); } static void req_scheduler_activate(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node, h2o_http3_req_scheduler_compare_cb comp) { /* unlink if necessary */ if (h2o_linklist_is_linked(&node->link)) h2o_linklist_unlink(&node->link); if (!node->priority.incremental || node->call_cnt == 0) { /* non-incremental streams and the first emission of incremental streams go in strict order */ h2o_linklist_t *anchor = &sched->active.urgencies[node->priority.urgency].high, *pos; for (pos = anchor->prev; pos != anchor; pos = pos->prev) { struct st_h2o_http3_req_scheduler_node_t *node_at_pos = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_req_scheduler_node_t, link, pos); if (comp(sched, node_at_pos, node) < 0) break; } h2o_linklist_insert(pos->next, &node->link); } else { /* once sent, incremental streams go into a lower list */ h2o_linklist_insert(&sched->active.urgencies[node->priority.urgency].low, &node->link); } /* book keeping */ if (node->priority.urgency < sched->active.smallest_urgency) sched->active.smallest_urgency = node->priority.urgency; } static void req_scheduler_update_smallest_urgency_post_removal(struct st_h2o_http3_req_scheduler_t *sched, size_t changed) { if (sched->active.smallest_urgency < changed) return; /* search from the location that *might* have changed */ sched->active.smallest_urgency = changed; while (h2o_linklist_is_empty(&sched->active.urgencies[sched->active.smallest_urgency].high) && h2o_linklist_is_empty(&sched->active.urgencies[sched->active.smallest_urgency].low)) { ++sched->active.smallest_urgency; if (sched->active.smallest_urgency >= H2O_ABSPRIO_NUM_URGENCY_LEVELS) break; } } static void req_scheduler_deactivate(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node) { if (h2o_linklist_is_linked(&node->link)) h2o_linklist_unlink(&node->link); req_scheduler_update_smallest_urgency_post_removal(sched, node->priority.urgency); } static void req_scheduler_setup_for_next(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node, h2o_http3_req_scheduler_compare_cb comp) { assert(h2o_linklist_is_linked(&node->link)); /* reschedule to achieve round-robin behavior */ if (node->priority.incremental) req_scheduler_activate(sched, node, comp); } static void req_scheduler_conn_blocked(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node) { if (h2o_linklist_is_linked(&node->link)) h2o_linklist_unlink(&node->link); h2o_linklist_insert(&sched->conn_blocked, &node->link); req_scheduler_update_smallest_urgency_post_removal(sched, node->priority.urgency); } static void req_scheduler_unblock_conn_blocked(struct st_h2o_http3_req_scheduler_t *sched, h2o_http3_req_scheduler_compare_cb comp) { while (!h2o_linklist_is_empty(&sched->conn_blocked)) { struct st_h2o_http3_req_scheduler_node_t *node = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_req_scheduler_node_t, link, sched->conn_blocked.next); req_scheduler_activate(sched, node, comp); } } static int req_scheduler_compare_stream_id(struct st_h2o_http3_req_scheduler_t *sched, const struct st_h2o_http3_req_scheduler_node_t *x, const struct st_h2o_http3_req_scheduler_node_t *y) { struct st_h2o_http3_server_stream_t *sx = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, scheduler, x), *sy = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, scheduler, y); if (sx->quic->stream_id < sy->quic->stream_id) { return -1; } else if (sx->quic->stream_id > sy->quic->stream_id) { return 1; } else { return 0; } } static struct st_h2o_http3_server_conn_t *get_conn(struct st_h2o_http3_server_stream_t *stream) { return (void *)stream->req.conn; } static uint32_t *get_state_counter(struct st_h2o_http3_server_conn_t *conn, enum h2o_http3_server_stream_state state) { return conn->num_streams.counters + (size_t)state; } static void request_run_delayed(struct st_h2o_http3_server_conn_t *conn) { if (!h2o_timer_is_linked(&conn->timeout)) h2o_timer_link(conn->super.ctx->loop, 0, &conn->timeout); } static void check_run_blocked(struct st_h2o_http3_server_conn_t *conn) { if (conn->num_streams.recv_body_unblocked + conn->num_streams_req_streaming == 0 && !h2o_linklist_is_empty(&conn->delayed_streams.recv_body_blocked)) request_run_delayed(conn); } static void destroy_tunnel(struct st_h2o_http3_server_stream_t *stream) { stream->tunnel->tunnel->destroy(stream->tunnel->tunnel); stream->tunnel->tunnel = NULL; /* remove stream from datagram flow list */ struct st_h2o_http3_server_conn_t *conn = get_conn(stream); if (stream->tunnel->datagram_flow_id != UINT64_MAX) { khiter_t iter = kh_get(stream, conn->datagram_flows, stream->tunnel->datagram_flow_id); /* it's possible the tunnel wasn't established yet */ if (iter != kh_end(conn->datagram_flows)) kh_del(stream, conn->datagram_flows, iter); } } static void pre_dispose_request(struct st_h2o_http3_server_stream_t *stream) { size_t i; /* release vectors */ for (i = 0; i != stream->sendbuf.vecs.size; ++i) { struct st_h2o_http3_server_sendvec_t *vec = stream->sendbuf.vecs.entries + i; if (vec->vec.callbacks->update_refcnt != NULL) vec->vec.callbacks->update_refcnt(&vec->vec, &stream->req, 0); } /* dispose request body buffer */ if (stream->req_body != NULL) h2o_buffer_dispose(&stream->req_body); /* clean up request streaming */ if (stream->req.write_req.cb != NULL) { struct st_h2o_http3_server_conn_t *conn = get_conn(stream); assert(conn->num_streams_req_streaming != 0); --conn->num_streams_req_streaming; check_run_blocked(conn); } /* clean up tunnel */ if (stream->tunnel != NULL) { if (stream->tunnel->tunnel != NULL) destroy_tunnel(stream); if (h2o_timer_is_linked(&stream->tunnel->up.delayed_write)) h2o_timer_unlink(&stream->tunnel->up.delayed_write); free(stream->tunnel); stream->tunnel = NULL; } } static void set_state(struct st_h2o_http3_server_stream_t *stream, enum h2o_http3_server_stream_state state, int in_generator) { struct st_h2o_http3_server_conn_t *conn = get_conn(stream); enum h2o_http3_server_stream_state old_state = stream->state; H2O_PROBE_CONN(H3S_STREAM_SET_STATE, &conn->super, stream->quic->stream_id, (unsigned)state); --*get_state_counter(conn, old_state); stream->state = state; ++*get_state_counter(conn, stream->state); switch (state) { case H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED: assert(conn->delayed_streams.recv_body_blocked.prev == &stream->link || !"stream is not registered to the recv_body list?"); break; case H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT: { pre_dispose_request(stream); if (!in_generator) { h2o_dispose_request(&stream->req); stream->req_disposed = 1; } static const quicly_stream_callbacks_t close_wait_callbacks = {on_stream_destroy, quicly_stream_noop_on_send_shift, quicly_stream_noop_on_send_emit, quicly_stream_noop_on_send_stop, quicly_stream_noop_on_receive, quicly_stream_noop_on_receive_reset}; stream->quic->callbacks = &close_wait_callbacks; } break; default: break; } } /** * Shutdowns a stream. Note that a request stream should not be shut down until receiving some QUIC frame that refers to that * stream, but we might might have created stream state due to receiving a PRIORITY_UPDATE frame prior to that (see * handle_priority_update_frame). */ static void shutdown_stream(struct st_h2o_http3_server_stream_t *stream, int stop_sending_code, int reset_code, int in_generator) { assert(stream->state < H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT); if (h2o_linklist_is_linked(&stream->link)) h2o_linklist_unlink(&stream->link); if (quicly_stream_has_receive_side(0, stream->quic->stream_id)) quicly_request_stop(stream->quic, stop_sending_code); if (quicly_stream_has_send_side(0, stream->quic->stream_id) && !quicly_sendstate_transfer_complete(&stream->quic->sendstate)) quicly_reset_stream(stream->quic, reset_code); set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT, in_generator); } static socklen_t get_sockname(h2o_conn_t *_conn, struct sockaddr *sa) { struct st_h2o_http3_server_conn_t *conn = (void *)_conn; struct sockaddr *src = quicly_get_sockname(conn->h3.super.quic); socklen_t len = src->sa_family == AF_UNSPEC ? sizeof(struct sockaddr) : quicly_get_socklen(src); memcpy(sa, src, len); return len; } static socklen_t get_peername(h2o_conn_t *_conn, struct sockaddr *sa) { struct st_h2o_http3_server_conn_t *conn = (void *)_conn; struct sockaddr *src = quicly_get_peername(conn->h3.super.quic); socklen_t len = quicly_get_socklen(src); memcpy(sa, src, len); return len; } static ptls_t *get_ptls(h2o_conn_t *_conn) { struct st_h2o_http3_server_conn_t *conn = (void *)_conn; return quicly_get_tls(conn->h3.super.quic); } static int get_skip_tracing(h2o_conn_t *conn) { ptls_t *ptls = get_ptls(conn); return ptls_skip_tracing(ptls); } static h2o_iovec_t log_cc_name(h2o_req_t *req) { struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; quicly_stats_t stats; if (quicly_get_stats(conn->h3.super.quic, &stats) == 0) { switch (stats.cc.impl->type) { case CC_RENO_MODIFIED: return h2o_iovec_init(H2O_STRLIT("reno")); case CC_CUBIC: return h2o_iovec_init(H2O_STRLIT("cubic")); } } return h2o_iovec_init(NULL, 0); } static h2o_iovec_t log_tls_protocol_version(h2o_req_t *_req) { return h2o_iovec_init(H2O_STRLIT("TLSv1.3")); } static h2o_iovec_t log_session_reused(h2o_req_t *req) { struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; ptls_t *tls = quicly_get_tls(conn->h3.super.quic); return ptls_is_psk_handshake(tls) ? h2o_iovec_init(H2O_STRLIT("1")) : h2o_iovec_init(H2O_STRLIT("0")); } static h2o_iovec_t log_cipher(h2o_req_t *req) { struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; ptls_t *tls = quicly_get_tls(conn->h3.super.quic); ptls_cipher_suite_t *cipher = ptls_get_cipher(tls); return cipher != NULL ? h2o_iovec_init(cipher->aead->name, strlen(cipher->aead->name)) : h2o_iovec_init(NULL, 0); } static h2o_iovec_t log_cipher_bits(h2o_req_t *req) { struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; ptls_t *tls = quicly_get_tls(conn->h3.super.quic); ptls_cipher_suite_t *cipher = ptls_get_cipher(tls); if (cipher == NULL) return h2o_iovec_init(NULL, 0); char *buf = h2o_mem_alloc_pool(&req->pool, char, sizeof(H2O_UINT16_LONGEST_STR)); return h2o_iovec_init(buf, sprintf(buf, "%" PRIu16, (uint16_t)(cipher->aead->key_size * 8))); } static h2o_iovec_t log_session_id(h2o_req_t *_req) { /* FIXME */ return h2o_iovec_init(NULL, 0); } static h2o_iovec_t log_server_name(h2o_req_t *req) { struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; ptls_t *tls = quicly_get_tls(conn->h3.super.quic); const char *server_name = ptls_get_server_name(tls); return server_name != NULL ? h2o_iovec_init(server_name, strlen(server_name)) : h2o_iovec_init(NULL, 0); } static h2o_iovec_t log_negotiated_protocol(h2o_req_t *req) { struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; ptls_t *tls = quicly_get_tls(conn->h3.super.quic); const char *proto = ptls_get_negotiated_protocol(tls); return proto != NULL ? h2o_iovec_init(proto, strlen(proto)) : h2o_iovec_init(NULL, 0); } static h2o_iovec_t log_stream_id(h2o_req_t *_req) { struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req); char *buf = h2o_mem_alloc_pool(&stream->req.pool, char, sizeof(H2O_UINT64_LONGEST_STR)); return h2o_iovec_init(buf, sprintf(buf, "%" PRIu64, stream->quic->stream_id)); } static h2o_iovec_t log_quic_stats(h2o_req_t *req) { #define APPLY_NUM_FRAMES(f, dir) \ f(padding, dir) f(ping, dir) f(ack, dir) f(reset_stream, dir) f(stop_sending, dir) f(crypto, dir) f(new_token, dir) \ f(stream, dir) f(max_data, dir) f(max_stream_data, dir) f(max_streams_bidi, dir) f(max_streams_uni, dir) \ f(data_blocked, dir) f(stream_data_blocked, dir) f(streams_blocked, dir) f(new_connection_id, dir) \ f(retire_connection_id, dir) f(path_challenge, dir) f(path_response, dir) f(transport_close, dir) \ f(application_close, dir) f(handshake_done, dir) f(ack_frequency, dir) #define FORMAT_OF_NUM_FRAMES(n, dir) "," H2O_TO_STR(n) "-" H2O_TO_STR(dir) "=%" PRIu64 #define VALUE_OF_NUM_FRAMES(n, dir) , stats.num_frames_##dir.n struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; quicly_stats_t stats; if (quicly_get_stats(conn->h3.super.quic, &stats) != 0) return h2o_iovec_init(H2O_STRLIT("-")); char *buf; size_t len, bufsize = 1400; Redo: buf = h2o_mem_alloc_pool(&req->pool, char, bufsize); len = snprintf(buf, bufsize, "packets-received=%" PRIu64 ",packets-decryption-failed=%" PRIu64 ",packets-sent=%" PRIu64 ",packets-lost=%" PRIu64 ",packets-ack-received=%" PRIu64 ",bytes-received=%" PRIu64 ",bytes-sent=%" PRIu64 ",rtt-minimum=%" PRIu32 ",rtt-smoothed=%" PRIu32 ",rtt-variance=%" PRIu32 ",rtt-latest=%" PRIu32 ",cwnd=%" PRIu32 APPLY_NUM_FRAMES(FORMAT_OF_NUM_FRAMES, received) APPLY_NUM_FRAMES(FORMAT_OF_NUM_FRAMES, sent), stats.num_packets.received, stats.num_packets.decryption_failed, stats.num_packets.sent, stats.num_packets.lost, stats.num_packets.ack_received, stats.num_bytes.received, stats.num_bytes.sent, stats.rtt.minimum, stats.rtt.smoothed, stats.rtt.variance, stats.rtt.latest, stats.cc.cwnd APPLY_NUM_FRAMES(VALUE_OF_NUM_FRAMES, received) APPLY_NUM_FRAMES(VALUE_OF_NUM_FRAMES, sent)); if (len + 1 > bufsize) { bufsize = len + 1; goto Redo; } return h2o_iovec_init(buf, len); #undef APPLY_NUM_FRAMES #undef FORMAT_OF_NUM_FRAMES #undef VALUE_OF_NUM_FRAMES } void on_stream_destroy(quicly_stream_t *qs, int err) { struct st_h2o_http3_server_stream_t *stream = qs->data; struct st_h2o_http3_server_conn_t *conn = get_conn(stream); --*get_state_counter(conn, stream->state); req_scheduler_deactivate(&conn->scheduler.reqs, &stream->scheduler); if (h2o_linklist_is_linked(&stream->link)) h2o_linklist_unlink(&stream->link); if (stream->state != H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT) pre_dispose_request(stream); if (!stream->req_disposed) h2o_dispose_request(&stream->req); free(stream); } static void allocated_vec_update_refcnt(h2o_sendvec_t *vec, h2o_req_t *req, int is_incr) { assert(!is_incr); free(vec->raw); } static int retain_sendvecs(struct st_h2o_http3_server_stream_t *stream) { for (; stream->sendbuf.min_index_to_addref != stream->sendbuf.vecs.size; ++stream->sendbuf.min_index_to_addref) { struct st_h2o_http3_server_sendvec_t *vec = stream->sendbuf.vecs.entries + stream->sendbuf.min_index_to_addref; /* create a copy if it does not provide update_refcnt (update_refcnt is already called in do_send, if available) */ if (vec->vec.callbacks->update_refcnt == NULL) { static const h2o_sendvec_callbacks_t vec_callbacks = {h2o_sendvec_flatten_raw, allocated_vec_update_refcnt}; size_t off_within_vec = stream->sendbuf.min_index_to_addref == 0 ? stream->sendbuf.off_within_first_vec : 0; h2o_iovec_t copy = h2o_iovec_init(h2o_mem_alloc(vec->vec.len - off_within_vec), vec->vec.len - off_within_vec); if (!(*vec->vec.callbacks->flatten)(&vec->vec, &stream->req, copy, off_within_vec)) { free(copy.base); return 0; } vec->vec = (h2o_sendvec_t){&vec_callbacks, copy.len, {copy.base}}; if (stream->sendbuf.min_index_to_addref == 0) stream->sendbuf.off_within_first_vec = 0; } } return 1; } static void on_send_shift(quicly_stream_t *qs, size_t delta) { struct st_h2o_http3_server_stream_t *stream = qs->data; size_t i; assert(stream->state == H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS || stream->state == H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY); assert(delta != 0); assert(stream->sendbuf.vecs.size != 0); size_t bytes_avail_in_first_vec = stream->sendbuf.vecs.entries[0].vec.len - stream->sendbuf.off_within_first_vec; if (delta < bytes_avail_in_first_vec) { stream->sendbuf.off_within_first_vec += delta; return; } delta -= bytes_avail_in_first_vec; stream->sendbuf.off_within_first_vec = 0; if (stream->sendbuf.vecs.entries[0].vec.callbacks->update_refcnt != NULL) stream->sendbuf.vecs.entries[0].vec.callbacks->update_refcnt(&stream->sendbuf.vecs.entries[0].vec, &stream->req, 0); for (i = 1; delta != 0; ++i) { assert(i < stream->sendbuf.vecs.size); if (delta < stream->sendbuf.vecs.entries[i].vec.len) { stream->sendbuf.off_within_first_vec = delta; break; } delta -= stream->sendbuf.vecs.entries[i].vec.len; if (stream->sendbuf.vecs.entries[i].vec.callbacks->update_refcnt != NULL) stream->sendbuf.vecs.entries[i].vec.callbacks->update_refcnt(&stream->sendbuf.vecs.entries[i].vec, &stream->req, 0); } memmove(stream->sendbuf.vecs.entries, stream->sendbuf.vecs.entries + i, (stream->sendbuf.vecs.size - i) * sizeof(stream->sendbuf.vecs.entries[0])); stream->sendbuf.vecs.size -= i; if (stream->sendbuf.min_index_to_addref <= i) { stream->sendbuf.min_index_to_addref = 0; } else { stream->sendbuf.min_index_to_addref -= i; } if (stream->sendbuf.vecs.size == 0) { if (quicly_sendstate_is_open(&stream->quic->sendstate)) { assert(stream->state == H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS || stream->proceed_requested); } else { set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT, 0); } } } static void on_send_emit(quicly_stream_t *qs, size_t off, void *_dst, size_t *len, int *wrote_all) { struct st_h2o_http3_server_stream_t *stream = qs->data; assert(stream->state == H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS || stream->state == H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY); uint8_t *dst = _dst, *dst_end = dst + *len; size_t vec_index = 0; /* find the start position identified by vec_index and off */ off += stream->sendbuf.off_within_first_vec; while (off != 0) { assert(vec_index < stream->sendbuf.vecs.size); if (off < stream->sendbuf.vecs.entries[vec_index].vec.len) break; off -= stream->sendbuf.vecs.entries[vec_index].vec.len; ++vec_index; } assert(vec_index < stream->sendbuf.vecs.size); /* write */ *wrote_all = 0; do { struct st_h2o_http3_server_sendvec_t *this_vec = stream->sendbuf.vecs.entries + vec_index; size_t sz = this_vec->vec.len - off; if (dst_end - dst < sz) sz = dst_end - dst; if (!(this_vec->vec.callbacks->flatten)(&this_vec->vec, &stream->req, h2o_iovec_init(dst, sz), off)) goto Error; if (this_vec->entity_offset != UINT64_MAX && stream->req.bytes_sent < this_vec->entity_offset + off + sz) stream->req.bytes_sent = this_vec->entity_offset + off + sz; dst += sz; off += sz; /* when reaching the end of the current vector, update vec_index, wrote_all */ if (off == this_vec->vec.len) { off = 0; ++vec_index; if (vec_index == stream->sendbuf.vecs.size) { *wrote_all = 1; break; } } } while (dst != dst_end); *len = dst - (uint8_t *)_dst; /* retain the payload of response body before calling `h2o_proceed_request`, as the generator might discard the buffer */ if (stream->state == H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY && *wrote_all && quicly_sendstate_is_open(&stream->quic->sendstate) && !stream->proceed_requested) { if (!retain_sendvecs(stream)) goto Error; stream->proceed_requested = 1; stream->proceed_while_sending = 1; } return; Error: *len = 0; *wrote_all = 1; shutdown_stream(stream, H2O_HTTP3_ERROR_EARLY_RESPONSE, H2O_HTTP3_ERROR_INTERNAL, 0); } static void on_send_stop(quicly_stream_t *qs, int err) { struct st_h2o_http3_server_stream_t *stream = qs->data; shutdown_stream(stream, H2O_HTTP3_ERROR_REQUEST_CANCELLED, err, 0); } static void handle_buffered_input(struct st_h2o_http3_server_stream_t *stream) { struct st_h2o_http3_server_conn_t *conn = get_conn(stream); size_t bytes_available = quicly_recvstate_bytes_available(&stream->quic->recvstate); assert(bytes_available <= stream->recvbuf.buf->size); const uint8_t *src = (const uint8_t *)stream->recvbuf.buf->bytes, *src_end = src + bytes_available; /* consume contiguous bytes */ if (quicly_stop_requested(stream->quic)) { src = src_end; } else { while (src != src_end) { int err; const char *err_desc = NULL; if ((err = stream->recvbuf.handle_input(stream, &src, src_end, &err_desc)) != 0) { if (err == H2O_HTTP3_ERROR_INCOMPLETE) { if (!quicly_recvstate_transfer_complete(&stream->quic->recvstate)) break; err = H2O_HTTP3_ERROR_GENERAL_PROTOCOL; err_desc = "incomplete frame"; } h2o_quic_close_connection(&conn->h3.super, err, err_desc); return; } if (quicly_stop_requested(stream->quic)) { src = src_end; break; } } } size_t bytes_consumed = src - (const uint8_t *)stream->recvbuf.buf->bytes; h2o_buffer_consume(&stream->recvbuf.buf, bytes_consumed); quicly_stream_sync_recvbuf(stream->quic, bytes_consumed); if (quicly_stop_requested(stream->quic)) return; if (stream->tunnel != NULL) { if (stream->tunnel->tunnel != NULL && !stream->tunnel->up.is_inflight) tunnel_write(stream); return; } if (quicly_recvstate_transfer_complete(&stream->quic->recvstate)) { if (stream->recvbuf.buf->size == 0 && (stream->recvbuf.handle_input == handle_input_expect_data || stream->recvbuf.handle_input == handle_input_post_trailers)) { /* have complete request, advance the state and process the request */ if (stream->req.content_length != SIZE_MAX && stream->req.content_length != stream->req.req_body_bytes_received) { /* the request terminated abruptly; reset the stream as we do for HTTP/2 */ shutdown_stream(stream, H2O_HTTP3_ERROR_NONE /* ignored */, stream->req.req_body_bytes_received < stream->req.content_length ? H2O_HTTP3_ERROR_REQUEST_INCOMPLETE : H2O_HTTP3_ERROR_GENERAL_PROTOCOL, 0); } else { if (stream->req.write_req.cb != NULL) { if (!h2o_linklist_is_linked(&stream->link)) h2o_linklist_insert(&conn->delayed_streams.req_streaming, &stream->link); request_run_delayed(conn); } else if (!stream->req.process_called && stream->state < H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS) { /* process the request, if we haven't called h2o_process_request nor send an error response */ switch (stream->state) { case H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS: case H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK: case H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_UNBLOCKED: break; default: assert(!"unexpected state"); break; } set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_REQ_PENDING, 0); h2o_linklist_insert(&conn->delayed_streams.pending, &stream->link); request_run_delayed(conn); } } } else { shutdown_stream(stream, H2O_HTTP3_ERROR_NONE /* ignored */, H2O_HTTP3_ERROR_REQUEST_INCOMPLETE, 0); } } else { if (stream->state == H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK && stream->req_body != NULL && stream->req_body->size >= H2O_HTTP3_REQUEST_BODY_MIN_BYTES_TO_BLOCK) { /* switch to blocked state if the request body is becoming large (this limits the concurrency to the backend) */ stream->read_blocked = 1; h2o_linklist_insert(&conn->delayed_streams.recv_body_blocked, &stream->link); set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED, 0); check_run_blocked(conn); } else if (stream->req.write_req.cb != NULL && stream->req_body->size != 0) { /* in streaming mode, let the run_delayed invoke write_req */ if (!h2o_linklist_is_linked(&stream->link)) h2o_linklist_insert(&conn->delayed_streams.req_streaming, &stream->link); request_run_delayed(conn); } } } static void on_receive(quicly_stream_t *qs, size_t off, const void *input, size_t len) { struct st_h2o_http3_server_stream_t *stream = qs->data; /* save received data (FIXME avoid copying if possible; see hqclient.c) */ h2o_http3_update_recvbuf(&stream->recvbuf.buf, off, input, len); if (stream->read_blocked) return; /* handle input (FIXME propage err_desc) */ handle_buffered_input(stream); } static void on_receive_reset(quicly_stream_t *qs, int err) { struct st_h2o_http3_server_stream_t *stream = qs->data; /* if we were still receiving the request, discard! */ if (stream->state == H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS) { if (h2o_linklist_is_linked(&stream->link)) h2o_linklist_unlink(&stream->link); shutdown_stream(stream, H2O_HTTP3_ERROR_NONE /* ignored */, H2O_HTTP3_ERROR_REQUEST_REJECTED, 0); } } static void proceed_request_streaming(h2o_req_t *_req, size_t bytes_written, h2o_send_state_t state) { struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req); struct st_h2o_http3_server_conn_t *conn = get_conn(stream); assert(stream->req_body != NULL); assert(!h2o_linklist_is_linked(&stream->link)); assert(conn->num_streams_req_streaming != 0); if (state != H2O_SEND_STATE_IN_PROGRESS) { /* tidy up the request streaming */ stream->req.write_req.cb = NULL; stream->req.write_req.ctx = NULL; stream->req.proceed_req = NULL; --conn->num_streams_req_streaming; check_run_blocked(conn); /* close the stream if an error occurred */ if (state == H2O_SEND_STATE_ERROR) { shutdown_stream(stream, H2O_HTTP3_ERROR_INTERNAL, H2O_HTTP3_ERROR_INTERNAL, 1); return; } } /* remove the bytes from the request body buffer */ assert(stream->req_body->size == bytes_written); h2o_buffer_consume(&stream->req_body, bytes_written); stream->req.entity = h2o_iovec_init(NULL, 0); /* unblock read until the next invocation of write_req, or after the final invocation */ stream->read_blocked = 0; /* handle input in the receive buffer */ handle_buffered_input(stream); } static void run_delayed(h2o_timer_t *timer) { struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, timeout, timer); int made_progress; do { made_progress = 0; /* promote blocked stream to unblocked state, if possible */ if (conn->num_streams.recv_body_unblocked + conn->num_streams_req_streaming == 0 && !h2o_linklist_is_empty(&conn->delayed_streams.recv_body_blocked)) { struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, link, conn->delayed_streams.recv_body_blocked.next); assert(stream->state == H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED); assert(stream->read_blocked); h2o_linklist_unlink(&stream->link); made_progress = 1; quicly_stream_set_receive_window(stream->quic, conn->super.ctx->globalconf->http3.active_stream_window_size); if (h2o_req_can_stream_request(&stream->req)) { /* use streaming mode */ ++conn->num_streams_req_streaming; stream->req.proceed_req = proceed_request_streaming; set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0); h2o_process_request(&stream->req); } else { /* unblock, read the bytes in receive buffer */ stream->read_blocked = 0; set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_UNBLOCKED, 0); handle_buffered_input(stream); if (quicly_get_state(conn->h3.super.quic) >= QUICLY_STATE_CLOSING) return; } } /* process streams using request streaming, that have new data to submit */ while (!h2o_linklist_is_empty(&conn->delayed_streams.req_streaming)) { struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, link, conn->delayed_streams.req_streaming.next); assert(stream->req.process_called); assert(stream->req.write_req.cb != NULL); assert(stream->req_body != NULL); assert(stream->req_body->size != 0); assert(!stream->read_blocked); h2o_linklist_unlink(&stream->link); stream->read_blocked = 1; made_progress = 1; if (stream->req.write_req.cb(stream->req.write_req.ctx, h2o_iovec_init(stream->req_body->bytes, stream->req_body->size), quicly_recvstate_transfer_complete(&stream->quic->recvstate)) != 0) { shutdown_stream(stream, H2O_HTTP3_ERROR_INTERNAL, H2O_HTTP3_ERROR_INTERNAL, 0); } } /* process the requests (not in streaming mode); TODO cap concurrency? */ while (!h2o_linklist_is_empty(&conn->delayed_streams.pending)) { struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, link, conn->delayed_streams.pending.next); assert(stream->state == H2O_HTTP3_SERVER_STREAM_STATE_REQ_PENDING); assert(!stream->req.process_called); assert(!stream->read_blocked); h2o_linklist_unlink(&stream->link); made_progress = 1; set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0); h2o_process_request(&stream->req); } } while (made_progress); } int handle_input_post_trailers(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, const char **err_desc) { h2o_http3_read_frame_t frame; int ret; /* read and ignore unknown frames */ if ((ret = h2o_http3_read_frame(&frame, 0, H2O_HTTP3_STREAM_TYPE_REQUEST, src, src_end, err_desc)) != 0) return ret; switch (frame.type) { case H2O_HTTP3_FRAME_TYPE_HEADERS: case H2O_HTTP3_FRAME_TYPE_DATA: return H2O_HTTP3_ERROR_FRAME_UNEXPECTED; default: break; } return 0; } static int handle_input_expect_data_payload(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, const char **err_desc) { size_t bytes_avail = src_end - *src; /* append data to body buffer */ if (bytes_avail > stream->recvbuf.bytes_left_in_data_frame) bytes_avail = stream->recvbuf.bytes_left_in_data_frame; if (stream->req_body == NULL) h2o_buffer_init(&stream->req_body, &h2o_socket_buffer_prototype); if (!h2o_buffer_try_append(&stream->req_body, *src, bytes_avail)) return H2O_HTTP3_ERROR_INTERNAL; stream->req.entity = h2o_iovec_init(stream->req_body->bytes, stream->req_body->size); stream->req.req_body_bytes_received += bytes_avail; stream->recvbuf.bytes_left_in_data_frame -= bytes_avail; *src += bytes_avail; if (stream->recvbuf.bytes_left_in_data_frame == 0) stream->recvbuf.handle_input = handle_input_expect_data; return 0; } int handle_input_expect_data(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, const char **err_desc) { h2o_http3_read_frame_t frame; int ret; /* read frame */ if ((ret = h2o_http3_read_frame(&frame, 0, H2O_HTTP3_STREAM_TYPE_REQUEST, src, src_end, err_desc)) != 0) return ret; switch (frame.type) { case H2O_HTTP3_FRAME_TYPE_HEADERS: /* when in tunnel mode, trailers forbidden */ if (stream->tunnel != NULL) { *err_desc = "unexpected frame type"; return H2O_HTTP3_ERROR_FRAME_UNEXPECTED; } /* trailers, ignore but disallow succeeding DATA or HEADERS frame */ stream->recvbuf.handle_input = handle_input_post_trailers; return 0; case H2O_HTTP3_FRAME_TYPE_DATA: if (stream->req.content_length != SIZE_MAX && stream->req.content_length - stream->req.req_body_bytes_received < frame.length) { /* The only viable option here is to reset the stream, as we might have already started streaming the request body * upstream. This behavior is consistent with what we do in HTTP/2. */ shutdown_stream(stream, H2O_HTTP3_ERROR_EARLY_RESPONSE, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, 0); return 0; } break; default: return 0; } /* got a DATA frame */ if (frame.length != 0) { stream->recvbuf.handle_input = handle_input_expect_data_payload; stream->recvbuf.bytes_left_in_data_frame = frame.length; } return 0; } static int handle_input_expect_headers_send_http_error(struct st_h2o_http3_server_stream_t *stream, void (*sendfn)(h2o_req_t *, const char *, const char *, int), const char *reason, const char *body, const char **err_desc) { if (!quicly_recvstate_transfer_complete(&stream->quic->recvstate)) quicly_request_stop(stream->quic, H2O_HTTP3_ERROR_EARLY_RESPONSE); set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0); sendfn(&stream->req, reason, body, 0); *err_desc = NULL; return 0; } static int handle_input_expect_headers_process_connect(struct st_h2o_http3_server_stream_t *stream, h2o_iovec_t *datagram_flow_id_field, const char **err_desc) { if (stream->req.content_length != SIZE_MAX) return handle_input_expect_headers_send_http_error(stream, h2o_send_error_400, "Invalid Request", "CONNECT request cannot have request body", err_desc); uint64_t datagram_flow_id = UINT64_MAX; if (datagram_flow_id_field != NULL) { /* CONNECT-UDP */ if (datagram_flow_id_field->base != NULL) { /* check if it can be used */ if (!h2o_http3_can_use_h3_datagram(&get_conn(stream)->h3)) return H2O_HTTP3_ERROR_GENERAL_PROTOCOL; /* TODO implement proper parsing */ datagram_flow_id = 0; for (const char *p = datagram_flow_id_field->base; p != datagram_flow_id_field->base + datagram_flow_id_field->len; ++p) { if (!('0' <= *p && *p <= '9')) break; datagram_flow_id = datagram_flow_id * 10 + *p - '0'; } } } set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0); stream->tunnel = h2o_mem_alloc(sizeof(*stream->tunnel)); stream->tunnel->tunnel = NULL; stream->tunnel->stream = stream; stream->tunnel->datagram_flow_id = datagram_flow_id; stream->tunnel->up.is_inflight = 0; stream->tunnel->up.delayed_write = (h2o_timer_t){.cb = tunnel_write_delayed}; h2o_process_request(&stream->req); return 0; } static int handle_input_expect_headers(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, const char **err_desc) { struct st_h2o_http3_server_conn_t *conn = get_conn(stream); h2o_http3_read_frame_t frame; int header_exists_map = 0, ret; h2o_iovec_t datagram_flow_id = {}; uint8_t header_ack[H2O_HPACK_ENCODE_INT_MAX_LENGTH]; size_t header_ack_len; /* read the HEADERS frame (or a frame that precedes that) */ if ((ret = h2o_http3_read_frame(&frame, 0, H2O_HTTP3_STREAM_TYPE_REQUEST, src, src_end, err_desc)) != 0) return ret; if (frame.type != H2O_HTTP3_FRAME_TYPE_HEADERS) { switch (frame.type) { case H2O_HTTP3_FRAME_TYPE_DATA: return H2O_HTTP3_ERROR_FRAME_UNEXPECTED; default: break; } return 0; } stream->recvbuf.handle_input = handle_input_expect_data; /* parse the headers, and ack */ if ((ret = h2o_qpack_parse_request(&stream->req.pool, get_conn(stream)->h3.qpack.dec, stream->quic->stream_id, &stream->req.input.method, &stream->req.input.scheme, &stream->req.input.authority, &stream->req.input.path, &stream->req.headers, &header_exists_map, &stream->req.content_length, NULL /* TODO cache-digests */, &datagram_flow_id, header_ack, &header_ack_len, frame.payload, frame.length, err_desc)) != 0 && ret != H2O_HTTP2_ERROR_INVALID_HEADER_CHAR) return ret; if (header_ack_len != 0) h2o_http3_send_qpack_header_ack(&conn->h3, header_ack, header_ack_len); if (stream->req.input.scheme == NULL) stream->req.input.scheme = &H2O_URL_SCHEME_HTTPS; h2o_probe_log_request(&stream->req, stream->quic->stream_id); int is_connect = h2o_memis(stream->req.input.method.base, stream->req.input.method.len, H2O_STRLIT("CONNECT")); int is_connect_udp = h2o_memis(stream->req.input.method.base, stream->req.input.method.len, H2O_STRLIT("CONNECT-UDP")); /* check if existence and non-existence of pseudo headers are correct */ int expected_map = H2O_HPACK_PARSE_HEADERS_METHOD_EXISTS | H2O_HPACK_PARSE_HEADERS_AUTHORITY_EXISTS; if (!is_connect && !is_connect_udp) expected_map |= H2O_HPACK_PARSE_HEADERS_SCHEME_EXISTS | H2O_HPACK_PARSE_HEADERS_PATH_EXISTS; if (is_connect_udp) { /* only require method and authority for connect-udp for now, ignore if the others are set */ if ((header_exists_map & expected_map) != expected_map) { shutdown_stream(stream, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, 0); return 0; } } else { if (header_exists_map != expected_map) { shutdown_stream(stream, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, 0); return 0; } } /* send a 400 error when observing an invalid header character */ if (ret == H2O_HTTP2_ERROR_INVALID_HEADER_CHAR) return handle_input_expect_headers_send_http_error(stream, h2o_send_error_400, "Invalid Request", *err_desc, err_desc); /* validate semantic requirement */ if (!h2o_req_validate_pseudo_headers(&stream->req)) return H2O_HTTP3_ERROR_GENERAL_PROTOCOL; /* check if content-length is within the permitted bounds */ if (stream->req.content_length != SIZE_MAX && stream->req.content_length > conn->super.ctx->globalconf->max_request_entity_size) return handle_input_expect_headers_send_http_error(stream, h2o_send_error_413, "Request Entity Too Large", "request entity is too large", err_desc); /* set priority */ assert(!h2o_linklist_is_linked(&stream->scheduler.link)); if (!stream->received_priority_update) { ssize_t index; if ((index = h2o_find_header(&stream->req.headers, H2O_TOKEN_PRIORITY, -1)) != -1) { h2o_iovec_t *value = &stream->req.headers.entries[index].value; h2o_absprio_parse_priority(value->base, value->len, &stream->scheduler.priority); } } /* special handling of CONNECT method */ if (is_connect) { return handle_input_expect_headers_process_connect(stream, NULL, err_desc); } else if (h2o_memis(stream->req.input.method.base, stream->req.input.method.len, H2O_STRLIT("CONNECT-UDP"))) { return handle_input_expect_headers_process_connect(stream, &datagram_flow_id, err_desc); } /* change state */ set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK, 0); return 0; } static void write_response(struct st_h2o_http3_server_stream_t *stream, h2o_iovec_t datagram_flow_id) { h2o_iovec_t frame = h2o_qpack_flatten_response( get_conn(stream)->h3.qpack.enc, &stream->req.pool, stream->quic->stream_id, NULL, stream->req.res.status, stream->req.res.headers.entries, stream->req.res.headers.size, &get_conn(stream)->super.ctx->globalconf->server_name, stream->req.res.content_length, datagram_flow_id); h2o_vector_reserve(&stream->req.pool, &stream->sendbuf.vecs, stream->sendbuf.vecs.size + 1); struct st_h2o_http3_server_sendvec_t *vec = stream->sendbuf.vecs.entries + stream->sendbuf.vecs.size++; h2o_sendvec_init_immutable(&vec->vec, frame.base, frame.len); vec->entity_offset = UINT64_MAX; stream->sendbuf.final_size += frame.len; } static size_t flatten_data_frame_header(struct st_h2o_http3_server_stream_t *stream, struct st_h2o_http3_server_sendvec_t *dst, size_t payload_size) { size_t header_size = 0; /* build header */ stream->sendbuf.data_frame_header_buf[header_size++] = H2O_HTTP3_FRAME_TYPE_DATA; header_size = quicly_encodev(stream->sendbuf.data_frame_header_buf + header_size, payload_size) - stream->sendbuf.data_frame_header_buf; /* initilaize the vector */ h2o_sendvec_init_raw(&dst->vec, stream->sendbuf.data_frame_header_buf, header_size); dst->entity_offset = UINT64_MAX; return header_size; } static void shutdown_by_generator(struct st_h2o_http3_server_stream_t *stream) { quicly_sendstate_shutdown(&stream->quic->sendstate, stream->sendbuf.final_size); if (stream->sendbuf.vecs.size == 0) set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT, 1); } static void finalize_do_send(struct st_h2o_http3_server_stream_t *stream) { quicly_stream_sync_sendbuf(stream->quic, 1); if (!stream->proceed_while_sending) h2o_quic_schedule_timer(&get_conn(stream)->h3.super); } static void do_send(h2o_ostream_t *_ostr, h2o_req_t *_req, h2o_sendvec_t *bufs, size_t bufcnt, h2o_send_state_t send_state) { struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, ostr_final, _ostr); assert(&stream->req == _req); stream->proceed_requested = 0; if (stream->state == H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS) { write_response(stream, h2o_iovec_init(NULL, 0)); h2o_probe_log_response(&stream->req, stream->quic->stream_id, NULL); set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY, 1); } else { assert(stream->state == H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY); assert(quicly_sendstate_is_open(&stream->quic->sendstate)); } /* If vectors carrying response body are being provided, copy them, incrementing the reference count if possible (for future * retransmissions), as well as prepending a DATA frame header */ if (bufcnt != 0) { h2o_vector_reserve(&stream->req.pool, &stream->sendbuf.vecs, stream->sendbuf.vecs.size + 1 + bufcnt); uint64_t prev_body_size = stream->sendbuf.final_body_size; for (size_t i = 0; i != bufcnt; ++i) { /* copy one body vector */ struct st_h2o_http3_server_sendvec_t *dst = stream->sendbuf.vecs.entries + stream->sendbuf.vecs.size + i + 1; dst->vec = bufs[i]; dst->entity_offset = stream->sendbuf.final_body_size; stream->sendbuf.final_body_size += bufs[i].len; /* retain reference count if possible */ if (bufs[i].callbacks->update_refcnt != NULL) bufs[i].callbacks->update_refcnt(bufs + i, &stream->req, 1); } uint64_t payload_size = stream->sendbuf.final_body_size - prev_body_size; /* build DATA frame header */ size_t header_size = flatten_data_frame_header(stream, stream->sendbuf.vecs.entries + stream->sendbuf.vecs.size, payload_size); /* update properties */ stream->sendbuf.vecs.size += 1 + bufcnt; stream->sendbuf.final_size += header_size + payload_size; } switch (send_state) { case H2O_SEND_STATE_IN_PROGRESS: break; case H2O_SEND_STATE_FINAL: case H2O_SEND_STATE_ERROR: /* TODO consider how to forward error, pending resolution of https://github.com/quicwg/base-drafts/issues/3300 */ shutdown_by_generator(stream); break; } finalize_do_send(stream); } static void do_send_informational(h2o_ostream_t *_ostr, h2o_req_t *_req) { struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, ostr_final, _ostr); assert(&stream->req == _req); write_response(stream, h2o_iovec_init(NULL, 0)); finalize_do_send(stream); } static void tunnel_on_read(h2o_tunnel_t *_tunnel, const char *err, const void *bytes, size_t len) { struct st_h2o_http3_server_stream_t *stream = _tunnel->data; stream->proceed_requested = 0; /* append DATA frame */ if (len != 0) { h2o_vector_reserve(&stream->req.pool, &stream->sendbuf.vecs, stream->sendbuf.vecs.size + 2); /* DATA frame header */ size_t header_size = flatten_data_frame_header(stream, stream->sendbuf.vecs.entries + stream->sendbuf.vecs.size++, len); /* payload */ struct st_h2o_http3_server_sendvec_t *vec = stream->sendbuf.vecs.entries + stream->sendbuf.vecs.size++; h2o_sendvec_init_raw(&vec->vec, bytes, len); vec->entity_offset = stream->sendbuf.final_body_size; stream->sendbuf.final_body_size += len; /* update final offset */ stream->sendbuf.final_size += header_size + len; } /* EOS */ if (err != NULL) { destroy_tunnel(stream); shutdown_by_generator(stream); } finalize_do_send(stream); } static void tunnel_on_udp_read(h2o_tunnel_t *_tunnel, h2o_iovec_t *datagrams, size_t num_datagrams) { struct st_h2o_http3_server_stream_t *stream = _tunnel->data; h2o_http3_send_h3_datagrams(&get_conn(stream)->h3, stream->tunnel->datagram_flow_id, datagrams, num_datagrams); } void tunnel_write(struct st_h2o_http3_server_stream_t *stream) { size_t bytes_to_send; assert(!stream->tunnel->up.is_inflight); if (stream->req_body == NULL || (bytes_to_send = stream->req_body->size) == 0) return; /* move chunk of data into stream->tunnel.up.buf */ if (bytes_to_send > sizeof(stream->tunnel->up.bytes_inflight)) bytes_to_send = sizeof(stream->tunnel->up.bytes_inflight); memcpy(stream->tunnel->up.bytes_inflight, stream->req_body->bytes, bytes_to_send); stream->tunnel->up.is_inflight = 1; h2o_buffer_consume(&stream->req_body, bytes_to_send); /* send */ stream->tunnel->tunnel->write_(stream->tunnel->tunnel, stream->tunnel->up.bytes_inflight, bytes_to_send); } void tunnel_write_delayed(h2o_timer_t *timer) { struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_tunnel_t, up.delayed_write, timer)->stream; tunnel_write(stream); } static void tunnel_on_write_complete(h2o_tunnel_t *tunnel, const char *err) { struct st_h2o_http3_server_stream_t *stream = tunnel->data; assert(stream->tunnel->up.is_inflight); stream->tunnel->up.is_inflight = 0; if (err != NULL) { destroy_tunnel(stream); shutdown_by_generator(stream); return; } tunnel_write(stream); } static void establish_tunnel(h2o_req_t *req, h2o_tunnel_t *tunnel, uint64_t idle_timeout) { struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, req); h2o_iovec_t datagram_flow_id = {}; if (stream->tunnel == NULL) { /* the tunnel has been closed in the meantime */ return; } stream->tunnel->tunnel = tunnel; tunnel->data = stream; tunnel->on_write_complete = tunnel_on_write_complete; tunnel->on_read = tunnel_on_read; /* setup datagram-level tunneling if possible */ if (stream->tunnel->datagram_flow_id != UINT64_MAX) { if (tunnel->udp_write != NULL) { /* register to the map */ struct st_h2o_http3_server_conn_t *conn = get_conn(stream); int r; khiter_t iter = kh_put(stream, conn->datagram_flows, stream->tunnel->datagram_flow_id, &r); assert(iter != kh_end(conn->datagram_flows)); kh_val(conn->datagram_flows, iter) = stream; /* set the callback */ tunnel->on_udp_read = tunnel_on_udp_read; /* build the header field */ datagram_flow_id.base = h2o_mem_alloc_pool(&req->pool, char, sizeof(H2O_UINT64_LONGEST_STR)); datagram_flow_id.len = sprintf(datagram_flow_id.base, "%" PRIu64, stream->tunnel->datagram_flow_id); } else { stream->tunnel->datagram_flow_id = UINT64_MAX; } } write_response(stream, datagram_flow_id); h2o_probe_log_response(&stream->req, stream->quic->stream_id, stream->tunnel->tunnel); set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY, 1); finalize_do_send(stream); assert(!stream->proceed_while_sending); stream->proceed_requested = 1; /* suppress invocation of `tunnel->proceed_read` until `tunnel_on_read` gets called */ if (stream->req_body != NULL) tunnel_write(stream); } static int handle_priority_update_frame(struct st_h2o_http3_server_conn_t *conn, const h2o_http3_priority_update_frame_t *frame) { if (frame->element_is_push) return H2O_HTTP3_ERROR_GENERAL_PROTOCOL; /* obtain the stream being referred to (creating one if necessary), or return if the stream has been closed already */ quicly_stream_t *qs; if (quicly_get_or_open_stream(conn->h3.super.quic, frame->element, &qs) != 0) return H2O_HTTP3_ERROR_ID; if (qs == NULL) return 0; /* apply the changes */ struct st_h2o_http3_server_stream_t *stream = qs->data; assert(stream != NULL); stream->received_priority_update = 1; if (h2o_linklist_is_linked(&stream->scheduler.link)) { req_scheduler_deactivate(&conn->scheduler.reqs, &stream->scheduler); stream->scheduler.priority = frame->priority; /* TODO apply only the delta? */ req_scheduler_activate(&conn->scheduler.reqs, &stream->scheduler, req_scheduler_compare_stream_id); } else { stream->scheduler.priority = frame->priority; /* TODO apply only the delta? */ } return 0; } static void handle_control_stream_frame(h2o_http3_conn_t *_conn, uint8_t type, const uint8_t *payload, size_t len) { struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, _conn); int err; const char *err_desc = NULL; if (!h2o_http3_has_received_settings(&conn->h3)) { if (type != H2O_HTTP3_FRAME_TYPE_SETTINGS) { err = H2O_HTTP3_ERROR_MISSING_SETTINGS; goto Fail; } if ((err = h2o_http3_handle_settings_frame(&conn->h3, payload, len, &err_desc)) != 0) goto Fail; assert(h2o_http3_has_received_settings(&conn->h3)); } else { switch (type) { case H2O_HTTP3_FRAME_TYPE_SETTINGS: err = H2O_HTTP3_ERROR_FRAME_UNEXPECTED; err_desc = "unexpected SETTINGS frame"; goto Fail; case H2O_HTTP3_FRAME_TYPE_PRIORITY_UPDATE: { h2o_http3_priority_update_frame_t frame; if ((err = h2o_http3_decode_priority_update_frame(&frame, payload, len, &err_desc)) != 0) goto Fail; if ((err = handle_priority_update_frame(conn, &frame)) != 0) { err_desc = "invalid PRIORITY_UPDATE frame"; goto Fail; } } break; default: break; } } return; Fail: h2o_quic_close_connection(&conn->h3.super, err, err_desc); } static int stream_open_cb(quicly_stream_open_t *self, quicly_stream_t *qs) { static const quicly_stream_callbacks_t callbacks = {on_stream_destroy, on_send_shift, on_send_emit, on_send_stop, on_receive, on_receive_reset}; /* handling of unidirectional streams is not server-specific */ if (quicly_stream_is_unidirectional(qs->stream_id)) { h2o_http3_on_create_unidirectional_stream(qs); return 0; } assert(quicly_stream_is_client_initiated(qs->stream_id)); struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qs->conn)); /* create new stream and start handling the request */ struct st_h2o_http3_server_stream_t *stream = h2o_mem_alloc(sizeof(*stream)); stream->quic = qs; h2o_buffer_init(&stream->recvbuf.buf, &h2o_socket_buffer_prototype); stream->recvbuf.handle_input = handle_input_expect_headers; memset(&stream->sendbuf, 0, sizeof(stream->sendbuf)); stream->state = H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS; stream->link = (h2o_linklist_t){NULL}; stream->ostr_final = (h2o_ostream_t){NULL, do_send, NULL, do_send_informational}; stream->scheduler.link = (h2o_linklist_t){NULL}; stream->scheduler.priority = h2o_absprio_default; stream->scheduler.call_cnt = 0; stream->read_blocked = 0; stream->proceed_requested = 0; stream->proceed_while_sending = 0; stream->received_priority_update = 0; stream->req_disposed = 0; stream->req_body = NULL; stream->tunnel = NULL; h2o_init_request(&stream->req, &conn->super, NULL); stream->req.version = 0x0300; stream->req._ostr_top = &stream->ostr_final; stream->req.establish_tunnel = establish_tunnel; stream->quic->data = stream; stream->quic->callbacks = &callbacks; ++*get_state_counter(get_conn(stream), stream->state); return 0; } static quicly_stream_open_t on_stream_open = {stream_open_cb}; static void unblock_conn_blocked_streams(struct st_h2o_http3_server_conn_t *conn) { conn->scheduler.uni.active |= conn->scheduler.uni.conn_blocked; conn->scheduler.uni.conn_blocked = 0; req_scheduler_unblock_conn_blocked(&conn->scheduler.reqs, req_scheduler_compare_stream_id); } static int scheduler_can_send(quicly_stream_scheduler_t *sched, quicly_conn_t *qc, int conn_is_saturated) { struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qc)); if (!conn_is_saturated) { /* not saturated, activate streams marked as being conn-blocked */ unblock_conn_blocked_streams(conn); } else { /* TODO lazily move the active request and unidirectional streams to conn_blocked. Not doing so results in at most one * spurious call to quicly_send. */ } if (conn->scheduler.uni.active != 0) return 1; if (conn->scheduler.reqs.active.smallest_urgency < H2O_ABSPRIO_NUM_URGENCY_LEVELS) return 1; return 0; } static int scheduler_do_send(quicly_stream_scheduler_t *sched, quicly_conn_t *qc, quicly_send_context_t *s) { struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qc)); int ret = 0; while (quicly_can_send_data(conn->h3.super.quic, s)) { /* The strategy is: * * 1. dequeue the first active stream * 2. link the stream to the conn_blocked list, if nothing can be sent for the stream due to the connection being capped * 3. otherwise, send * 4. enqueue to the appropriate place */ if (conn->scheduler.uni.active != 0) { static const ptrdiff_t stream_offsets[] = { offsetof(struct st_h2o_http3_server_conn_t, h3._control_streams.egress.control), offsetof(struct st_h2o_http3_server_conn_t, h3._control_streams.egress.qpack_encoder), offsetof(struct st_h2o_http3_server_conn_t, h3._control_streams.egress.qpack_decoder)}; /* 1. obtain pointer to the offending stream */ struct st_h2o_http3_egress_unistream_t *stream = NULL; size_t i; for (i = 0; i != sizeof(stream_offsets) / sizeof(stream_offsets[0]); ++i) { stream = *(void **)((char *)conn + stream_offsets[i]); if ((conn->scheduler.uni.active & (1 << stream->quic->stream_id)) != 0) break; } assert(i != sizeof(stream_offsets) / sizeof(stream_offsets[0]) && "we should have found one stream"); /* 2. move to the conn_blocked list if necessary */ if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0)) { conn->scheduler.uni.active &= ~(1 << stream->quic->stream_id); conn->scheduler.uni.conn_blocked |= 1 << stream->quic->stream_id; continue; } /* 3. send */ if ((ret = quicly_send_stream(stream->quic, s)) != 0) goto Exit; /* 4. update scheduler state */ conn->scheduler.uni.active &= ~(1 << stream->quic->stream_id); if (quicly_stream_can_send(stream->quic, 1)) { uint16_t *slot = &conn->scheduler.uni.active; if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0)) slot = &conn->scheduler.uni.conn_blocked; *slot |= 1 << stream->quic->stream_id; } } else if (conn->scheduler.reqs.active.smallest_urgency < H2O_ABSPRIO_NUM_URGENCY_LEVELS) { /* 1. obtain pointer to the offending stream */ h2o_linklist_t *anchor = &conn->scheduler.reqs.active.urgencies[conn->scheduler.reqs.active.smallest_urgency].high; if (h2o_linklist_is_empty(anchor)) { anchor = &conn->scheduler.reqs.active.urgencies[conn->scheduler.reqs.active.smallest_urgency].low; assert(!h2o_linklist_is_empty(anchor)); } struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, scheduler.link, anchor->next); /* 1. link to the conn_blocked list if necessary */ if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0)) { req_scheduler_conn_blocked(&conn->scheduler.reqs, &stream->scheduler); continue; } /* 3. send */ if ((ret = quicly_send_stream(stream->quic, s)) != 0) goto Exit; ++stream->scheduler.call_cnt; /* 4. invoke h2o_proceed_request synchronously, so that we could obtain additional data for the current (i.e. highest) * stream. */ if (stream->proceed_while_sending) { assert(stream->proceed_requested); if (stream->tunnel != NULL) { if (quicly_sendstate_is_open(&stream->quic->sendstate)) { stream->tunnel->tunnel->proceed_read(stream->tunnel->tunnel); } else { assert(stream->tunnel->tunnel == NULL); } } else { h2o_proceed_response(&stream->req); } stream->proceed_while_sending = 0; } /* 5. prepare for next */ if (quicly_stream_can_send(stream->quic, 1)) { if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0)) { /* capped by connection-level flow control, move the stream to conn-blocked */ req_scheduler_conn_blocked(&conn->scheduler.reqs, &stream->scheduler); } else { /* schedule for next emission */ req_scheduler_setup_for_next(&conn->scheduler.reqs, &stream->scheduler, req_scheduler_compare_stream_id); } } else { /* nothing to send at this moment */ req_scheduler_deactivate(&conn->scheduler.reqs, &stream->scheduler); } } else { break; } } Exit: return ret; } static int scheduler_update_state(struct st_quicly_stream_scheduler_t *sched, quicly_stream_t *qs) { struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qs->conn)); enum { DEACTIVATE, ACTIVATE, CONN_BLOCKED } new_state; if (quicly_stream_can_send(qs, 1)) { if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(qs, 0)) { new_state = CONN_BLOCKED; } else { new_state = ACTIVATE; } } else { new_state = DEACTIVATE; } if (quicly_stream_is_unidirectional(qs->stream_id)) { assert(qs->stream_id < sizeof(uint16_t) * 8); uint16_t mask = (uint16_t)1 << qs->stream_id; switch (new_state) { case DEACTIVATE: conn->scheduler.uni.active &= ~mask; conn->scheduler.uni.conn_blocked &= ~mask; break; case ACTIVATE: conn->scheduler.uni.active |= mask; conn->scheduler.uni.conn_blocked &= ~mask; break; case CONN_BLOCKED: conn->scheduler.uni.active &= ~mask; conn->scheduler.uni.conn_blocked |= mask; break; } } else { struct st_h2o_http3_server_stream_t *stream = qs->data; if (stream->proceed_while_sending) return 0; switch (new_state) { case DEACTIVATE: req_scheduler_deactivate(&conn->scheduler.reqs, &stream->scheduler); break; case ACTIVATE: req_scheduler_activate(&conn->scheduler.reqs, &stream->scheduler, req_scheduler_compare_stream_id); break; case CONN_BLOCKED: req_scheduler_conn_blocked(&conn->scheduler.reqs, &stream->scheduler); break; } } return 0; } static quicly_stream_scheduler_t scheduler = {scheduler_can_send, scheduler_do_send, scheduler_update_state}; static void datagram_frame_receive_cb(quicly_receive_datagram_frame_t *self, quicly_conn_t *quic, ptls_iovec_t datagram) { struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(quic)); uint64_t flow_id; h2o_iovec_t payload; /* decode */ if ((flow_id = h2o_http3_decode_h3_datagram(&payload, datagram.base, datagram.len)) == UINT64_MAX) { h2o_quic_close_connection(&conn->h3.super, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, "invalid DATAGRAM frame"); return; } /* find stream */ khiter_t iter = kh_get(stream, conn->datagram_flows, flow_id); if (iter == kh_end(conn->datagram_flows)) return; struct st_h2o_http3_server_stream_t *stream = kh_val(conn->datagram_flows, iter); assert(stream->tunnel->tunnel != NULL); /* forward */ stream->tunnel->tunnel->udp_write(stream->tunnel->tunnel, &payload, 1); } static quicly_receive_datagram_frame_t on_receive_datagram_frame = {datagram_frame_receive_cb}; static void on_h3_destroy(h2o_quic_conn_t *h3_) { h2o_http3_conn_t *h3 = (h2o_http3_conn_t *)h3_; struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, h3); H2O_PROBE_CONN0(H3S_DESTROY, &conn->super); /* unlink and dispose */ h2o_linklist_unlink(&conn->_conns); if (h2o_timer_is_linked(&conn->timeout)) h2o_timer_unlink(&conn->timeout); h2o_http3_dispose_conn(&conn->h3); /* check consistency post-disposal */ assert(conn->num_streams.recv_headers == 0); assert(conn->num_streams.req_pending == 0); assert(conn->num_streams.send_headers == 0); assert(conn->num_streams.send_body == 0); assert(conn->num_streams.close_wait == 0); assert(conn->num_streams_req_streaming == 0); assert(h2o_linklist_is_empty(&conn->delayed_streams.recv_body_blocked)); assert(h2o_linklist_is_empty(&conn->delayed_streams.req_streaming)); assert(h2o_linklist_is_empty(&conn->delayed_streams.pending)); assert(conn->scheduler.reqs.active.smallest_urgency >= H2O_ABSPRIO_NUM_URGENCY_LEVELS); assert(h2o_linklist_is_empty(&conn->scheduler.reqs.conn_blocked)); /* free memory */ free(conn); } h2o_http3_conn_t *h2o_http3_server_accept(h2o_http3_server_ctx_t *ctx, quicly_address_t *destaddr, quicly_address_t *srcaddr, quicly_decoded_packet_t *packet, quicly_address_token_plaintext_t *address_token, int skip_tracing, const h2o_http3_conn_callbacks_t *h3_callbacks) { static const h2o_conn_callbacks_t conn_callbacks = { .get_sockname = get_sockname, .get_peername = get_peername, .get_ptls = get_ptls, .skip_tracing = get_skip_tracing, .log_ = {{ .congestion_control = { .name_ = log_cc_name, }, .ssl = { .protocol_version = log_tls_protocol_version, .session_reused = log_session_reused, .cipher = log_cipher, .cipher_bits = log_cipher_bits, .session_id = log_session_id, .server_name = log_server_name, .negotiated_protocol = log_negotiated_protocol, }, .http3 = { .stream_id = log_stream_id, .quic_stats = log_quic_stats, }, }}, }; /* setup the structure */ struct st_h2o_http3_server_conn_t *conn = (void *)h2o_create_connection( sizeof(*conn), ctx->accept_ctx->ctx, ctx->accept_ctx->hosts, h2o_gettimeofday(ctx->accept_ctx->ctx->loop), &conn_callbacks); h2o_http3_init_conn(&conn->h3, &ctx->super, h3_callbacks, &ctx->qpack); conn->handshake_properties = (ptls_handshake_properties_t){{{{NULL}}}}; h2o_linklist_init_anchor(&conn->delayed_streams.recv_body_blocked); h2o_linklist_init_anchor(&conn->delayed_streams.req_streaming); h2o_linklist_init_anchor(&conn->delayed_streams.pending); h2o_timer_init(&conn->timeout, run_delayed); memset(&conn->num_streams, 0, sizeof(conn->num_streams)); conn->num_streams_req_streaming = 0; req_scheduler_init(&conn->scheduler.reqs); conn->scheduler.uni.active = 0; conn->scheduler.uni.conn_blocked = 0; conn->datagram_flows = kh_init(stream); conn->_conns = (h2o_linklist_t){}; /* accept connection */ #if PICOTLS_USE_DTRACE unsigned orig_skip_tracing = ptls_default_skip_tracing; ptls_default_skip_tracing = skip_tracing; #endif quicly_conn_t *qconn; int accept_ret = quicly_accept(&qconn, ctx->super.quic, &destaddr->sa, &srcaddr->sa, packet, address_token, &ctx->super.next_cid, &conn->handshake_properties); #if PICOTLS_USE_DTRACE ptls_default_skip_tracing = orig_skip_tracing; #endif if (accept_ret != 0) { h2o_http3_conn_t *ret = NULL; if (accept_ret == QUICLY_ERROR_DECRYPTION_FAILED) ret = (h2o_http3_conn_t *)H2O_QUIC_ACCEPT_CONN_DECRYPTION_FAILED; h2o_http3_dispose_conn(&conn->h3); free(conn); return ret; } ++ctx->super.next_cid.master_id; /* FIXME check overlap */ h2o_linklist_insert(&ctx->accept_ctx->ctx->http3._conns, &conn->_conns); h2o_http3_setup(&conn->h3, qconn); H2O_PROBE_CONN(H3S_ACCEPT, &conn->super, &conn->super, conn->h3.super.quic); h2o_quic_send(&conn->h3.super); return &conn->h3; } void h2o_http3_server_amend_quicly_context(h2o_globalconf_t *conf, quicly_context_t *quic) { quic->transport_params.max_data = conf->http3.active_stream_window_size; /* set to a size that does not block the unblocked request stream */ quic->transport_params.max_streams_uni = 10; quic->transport_params.max_stream_data.bidi_remote = H2O_HTTP3_INITIAL_REQUEST_STREAM_WINDOW_SIZE; quic->transport_params.max_idle_timeout = conf->http3.idle_timeout; quic->transport_params.min_ack_delay_usec = conf->http3.use_delayed_ack ? 0 : UINT64_MAX; quic->transport_params.max_datagram_frame_size = 1500; /* accept DATAGRAM frames; let the sender determine MTU, instead of being * potentially too restrictive */ quic->stream_open = &on_stream_open; quic->stream_scheduler = &scheduler; quic->receive_datagram_frame = &on_receive_datagram_frame; } static void graceful_shutdown_close_stragglers(h2o_timer_t *entry) { h2o_context_t *ctx = H2O_STRUCT_FROM_MEMBER(h2o_context_t, http3._graceful_shutdown_timeout, entry); h2o_linklist_t *node, *next; /* We've sent two GOAWAY frames, close the remaining connections */ for (node = ctx->http3._conns.next; node != &ctx->http3._conns; node = next) { struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, _conns, node); next = node->next; h2o_quic_close_connection(&conn->h3.super, 0, "shutting down"); } ctx->http3._graceful_shutdown_timeout.cb = NULL; } static void graceful_shutdown_resend_goaway(h2o_timer_t *entry) { h2o_context_t *ctx = H2O_STRUCT_FROM_MEMBER(h2o_context_t, http3._graceful_shutdown_timeout, entry); h2o_linklist_t *node; int do_close_stragglers = 0; /* HTTP/3 draft section 5.2.8 -- * "After allowing time for any in-flight requests or pushes to arrive, the endpoint can send another GOAWAY frame * indicating which requests or pushes it might accept before the end of the connection. * This ensures that a connection can be cleanly shut down without losing requests. */ for (node = ctx->http3._conns.next; node != &ctx->http3._conns; node = node->next) { struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, _conns, node); if (conn->h3.state < H2O_HTTP3_CONN_STATE_HALF_CLOSED && quicly_get_state(conn->h3.super.quic) == QUICLY_STATE_CONNECTED) { quicly_stream_id_t next_stream_id = quicly_get_remote_next_stream_id(conn->h3.super.quic, 0 /* == bidi */); /* Section 5.2-1: "This identifier MAY be zero if no requests or pushes were processed."" */ quicly_stream_id_t max_stream_id = next_stream_id < 4 ? 0 /* we haven't received any stream yet */ : next_stream_id - 4; h2o_http3_send_goaway_frame(&conn->h3, max_stream_id); conn->h3.state = H2O_HTTP3_CONN_STATE_HALF_CLOSED; do_close_stragglers = 1; } } /* After waiting a second, we still had active connections. If configured, wait one * final timeout before closing the connections */ if (do_close_stragglers && ctx->globalconf->http3.graceful_shutdown_timeout > 0) { ctx->http3._graceful_shutdown_timeout.cb = graceful_shutdown_close_stragglers; h2o_timer_link(ctx->loop, ctx->globalconf->http3.graceful_shutdown_timeout, &ctx->http3._graceful_shutdown_timeout); } else { ctx->http3._graceful_shutdown_timeout.cb = NULL; } } static void initiate_graceful_shutdown(h2o_context_t *ctx) { h2o_linklist_t *node; /* only doit once */ if (ctx->http3._graceful_shutdown_timeout.cb != NULL) return; ctx->http3._graceful_shutdown_timeout.cb = graceful_shutdown_resend_goaway; for (node = ctx->http3._conns.next; node != &ctx->http3._conns; node = node->next) { struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, _conns, node); /* There is a moment where the control stream is already closed while st_h2o_http3_server_conn_t is not. * Check QUIC connection state to skip sending GOAWAY in such a case. */ if (conn->h3.state < H2O_HTTP3_CONN_STATE_HALF_CLOSED && quicly_get_state(conn->h3.super.quic) == QUICLY_STATE_CONNECTED) { /* advertise the maximum stream ID to indicate that we will no longer accept new requests. * HTTP/3 draft section 5.2.8 -- * "An endpoint that is attempting to gracefully shut down a connection can send a GOAWAY frame with a value set to the * maximum possible value (2^62-4 for servers, 2^62-1 for clients). This ensures that the peer stops creating new * requests or pushes." */ h2o_http3_send_goaway_frame(&conn->h3, (UINT64_C(1) << 62) - 4); } } h2o_timer_link(ctx->loop, 1000, &ctx->http3._graceful_shutdown_timeout); } struct foreach_request_ctx { int (*cb)(h2o_req_t *req, void *cbdata); void *cbdata; }; static int foreach_request_per_conn(void *_ctx, quicly_stream_t *qs) { struct foreach_request_ctx *ctx = _ctx; /* skip if the stream is not a request stream (TODO handle push?) */ if (!(quicly_stream_is_client_initiated(qs->stream_id) && !quicly_stream_is_unidirectional(qs->stream_id))) return 0; struct st_h2o_http3_server_stream_t *stream = qs->data; assert(stream->quic == qs); if (stream->state == H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT) return 0; return ctx->cb(&stream->req, ctx->cbdata); } static int foreach_request(h2o_context_t *ctx, int (*cb)(h2o_req_t *req, void *cbdata), void *cbdata) { struct foreach_request_ctx foreach_ctx = {.cb = cb, .cbdata = cbdata}; for (h2o_linklist_t *node = ctx->http3._conns.next; node != &ctx->http3._conns; node = node->next) { struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, _conns, node); quicly_foreach_stream(conn->h3.super.quic, &foreach_ctx, foreach_request_per_conn); } return 0; } const h2o_protocol_callbacks_t H2O_HTTP3_SERVER_CALLBACKS = {initiate_graceful_shutdown, foreach_request}; const h2o_http3_conn_callbacks_t H2O_HTTP3_CONN_CALLBACKS = {{on_h3_destroy}, handle_control_stream_frame};
1
14,987
This approach would work only if there's a guarantee that the stream does not get freed before `establish_tunnel` is called, even though it _may_ enter CLOSE_WAIT state (at which point `stream->tunnel` is set to NULL). I do not think that assumption holds. Hence the alternate approach.
h2o-h2o
c
@@ -5,8 +5,8 @@ nano::error nano::node_pow_server_config::serialize_toml (nano::tomlconfig & toml) const { - toml.put ("enable", enable, "Enable or disable starting Nano PoW Server as a child process.\ntype:bool"); - toml.put ("nano_pow_server_path", pow_server_path, "Path to the nano_pow_server executable.\ntype:string,path"); + toml.put ("enable", enable, "Enable or disable starting Nano PoW Server as a child process\ntype:bool"); + toml.put ("nano_pow_server_path", pow_server_path, "Path to the nano_pow_server executable\ntype:string,path"); return toml.get_error (); }
1
#include <nano/lib/config.hpp> #include <nano/lib/rpcconfig.hpp> #include <nano/lib/tomlconfig.hpp> #include <nano/node/node_pow_server_config.hpp> nano::error nano::node_pow_server_config::serialize_toml (nano::tomlconfig & toml) const { toml.put ("enable", enable, "Enable or disable starting Nano PoW Server as a child process.\ntype:bool"); toml.put ("nano_pow_server_path", pow_server_path, "Path to the nano_pow_server executable.\ntype:string,path"); return toml.get_error (); } nano::error nano::node_pow_server_config::deserialize_toml (nano::tomlconfig & toml) { toml.get_optional<bool> ("enable", enable); toml.get_optional<std::string> ("nano_pow_server_path", pow_server_path); return toml.get_error (); }
1
15,978
Did you mean to remove the periods here while newly adding to other locations?
nanocurrency-nano-node
cpp
@@ -2014,7 +2014,7 @@ func (bps *blockPutState) removeOtherBps(other *blockPutState) { // slice length. newLen := len(bps.blockStates) - len(other.blockStates) if newLen <= 0 { - newLen = 1 + newLen = 0 } // Remove any blocks that appear in `other`.
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "fmt" "os" "reflect" "strings" "sync" "time" "github.com/keybase/backoff" "github.com/keybase/client/go/libkb" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/go-framed-msgpack-rpc/rpc" "github.com/keybase/kbfs/kbfsblock" "github.com/keybase/kbfs/kbfscrypto" "github.com/keybase/kbfs/kbfsmd" "github.com/keybase/kbfs/kbfssync" "github.com/keybase/kbfs/tlf" "github.com/pkg/errors" "golang.org/x/net/context" ) // mdReadType indicates whether a read needs identifies. type mdReadType int const ( // A read request that doesn't need an identify to be // performed. mdReadNoIdentify mdReadType = iota // A read request that needs an identify to be performed (if // it hasn't been already). mdReadNeedIdentify ) // mdUpdateType indicates update type. type mdUpdateType int const ( mdWrite mdUpdateType = iota // A rekey request. Doesn't need an identify to be performed, as // a rekey does its own (finer-grained) identifies. mdRekey ) type branchType int const ( standard branchType = iota // an online, read-write branch archive // an online, read-only branch offline // an offline, read-write branch archiveOffline // an offline, read-only branch ) // Constants used in this file. TODO: Make these configurable? const ( // MaxBlockSizeBytesDefault is the default maximum block size for KBFS. // 512K blocks by default, block changes embedded max == 8K. // Block size was chosen somewhat arbitrarily by trying to // minimize the overall size of the history written by a user when // appending 1KB writes to a file, up to a 1GB total file. Here // is the output of a simple script that approximates that // calculation: // // Total history size for 0065536-byte blocks: 1134341128192 bytes // Total history size for 0131072-byte blocks: 618945052672 bytes // Total history size for 0262144-byte blocks: 412786622464 bytes // Total history size for 0524288-byte blocks: 412786622464 bytes // Total history size for 1048576-byte blocks: 618945052672 bytes // Total history size for 2097152-byte blocks: 1134341128192 bytes // Total history size for 4194304-byte blocks: 2216672886784 bytes MaxBlockSizeBytesDefault = 512 << 10 // Maximum number of blocks that can be sent in parallel maxParallelBlockPuts = 100 // Maximum number of blocks that can be fetched in parallel maxParallelBlockGets = 10 // Max response size for a single DynamoDB query is 1MB. maxMDsAtATime = 10 // Cap the number of times we retry after a recoverable error maxRetriesOnRecoverableErrors = 10 // When the number of dirty bytes exceeds this level, force a sync. dirtyBytesThreshold = maxParallelBlockPuts * MaxBlockSizeBytesDefault // The timeout for any background task. backgroundTaskTimeout = 1 * time.Minute // If it's been more than this long since our last update, check // the current head before downloading all of the new revisions. fastForwardTimeThresh = 15 * time.Minute // If there are more than this many new revisions, fast forward // rather than downloading them all. fastForwardRevThresh = 50 ) type fboMutexLevel mutexLevel const ( fboMDWriter fboMutexLevel = 1 fboHead fboMutexLevel = 2 fboBlock fboMutexLevel = 3 ) func (o fboMutexLevel) String() string { switch o { case fboMDWriter: return "mdWriterLock" case fboHead: return "headLock" case fboBlock: return "blockLock" default: return fmt.Sprintf("Invalid fboMutexLevel %d", int(o)) } } func fboMutexLevelToString(o mutexLevel) string { return (fboMutexLevel(o)).String() } // Rules for working with lockState in FBO: // // - Every "execution flow" (i.e., program flow that happens // sequentially) needs its own lockState object. This usually means // that each "public" FBO method does: // // lState := makeFBOLockState() // // near the top. // // - Plumb lState through to all functions that hold any of the // relevant locks, or are called under those locks. // // This way, violations of the lock hierarchy will be detected at // runtime. func makeFBOLockState() *lockState { return makeLevelState(fboMutexLevelToString) } // blockLock is just like a sync.RWMutex, but with an extra operation // (DoRUnlockedIfPossible). type blockLock struct { leveledRWMutex locked bool } func (bl *blockLock) Lock(lState *lockState) { bl.leveledRWMutex.Lock(lState) bl.locked = true } func (bl *blockLock) Unlock(lState *lockState) { bl.locked = false bl.leveledRWMutex.Unlock(lState) } // DoRUnlockedIfPossible must be called when r- or w-locked. If // r-locked, r-unlocks, runs the given function, and r-locks after // it's done. Otherwise, just runs the given function. func (bl *blockLock) DoRUnlockedIfPossible(lState *lockState, f func(*lockState)) { if !bl.locked { bl.RUnlock(lState) defer bl.RLock(lState) } f(lState) } // headTrustStatus marks whether the head is from a trusted or // untrusted source. When rekeying we get the head MD by folder id // and do not check the tlf handle type headTrustStatus int const ( headUntrusted headTrustStatus = iota headTrusted ) type cachedDirOp struct { dirOp op nodes []Node } // folderBranchOps implements the KBFSOps interface for a specific // branch of a specific folder. It is go-routine safe for operations // within the folder. // // We use locks to protect against multiple goroutines accessing the // same folder-branch. The goal with our locking strategy is maximize // concurrent access whenever possible. See design/state_machine.md // for more details. There are three important locks: // // 1) mdWriterLock: Any "remote-sync" operation (one which modifies the // folder's metadata) must take this lock during the entirety of // its operation, to avoid forking the MD. // // 2) headLock: This is a read/write mutex. It must be taken for // reading before accessing any part of the current head MD. It // should be taken for the shortest time possible -- that means in // general that it should be taken, and the MD copied to a // goroutine-local variable, and then it can be released. // Remote-sync operations should take it for writing after pushing // all of the blocks and MD to the KBFS servers (i.e., all network // accesses), and then hold it until after all notifications have // been fired, to ensure that no concurrent "local" operations ever // see inconsistent state locally. // // 3) blockLock: This too is a read/write mutex. It must be taken for // reading before accessing any blocks in the block cache that // belong to this folder/branch. This includes checking their // dirty status. It should be taken for the shortest time possible // -- that means in general it should be taken, and then the blocks // that will be modified should be copied to local variables in the // goroutine, and then it should be released. The blocks should // then be modified locally, and then readied and pushed out // remotely. Only after the blocks have been pushed to the server // should a remote-sync operation take the lock again (this time // for writing) and put/finalize the blocks. Write and Truncate // should take blockLock for their entire lifetime, since they // don't involve writes over the network. Furthermore, if a block // is not in the cache and needs to be fetched, we should release // the mutex before doing the network operation, and lock it again // before writing the block back to the cache. // // We want to allow writes and truncates to a file that's currently // being sync'd, like any good networked file system. The tricky part // is making sure the changes can both: a) be read while the sync is // happening, and b) be applied to the new file path after the sync is // done. // // For now, we just do the dumb, brute force thing for now: if a block // is currently being sync'd, it copies the block and puts it back // into the cache as modified. Then, when the sync finishes, it // throws away the modified blocks and re-applies the change to the // new file path (which might have a completely different set of // blocks, so we can't just reuse the blocks that were modified during // the sync.) type folderBranchOps struct { config Config folderBranch FolderBranch bid BranchID // protected by mdWriterLock bType branchType observers *observerList // these locks, when locked concurrently by the same goroutine, // should only be taken in the following order to avoid deadlock: mdWriterLock leveledMutex // taken by any method making MD modifications dirOps []cachedDirOp // protects access to head, headStatus, latestMergedRevision, // and hasBeenCleared. headLock leveledRWMutex head ImmutableRootMetadata headStatus headTrustStatus // latestMergedRevision tracks the latest heard merged revision on server latestMergedRevision kbfsmd.Revision // Has this folder ever been cleared? hasBeenCleared bool blocks folderBlockOps prepper folderUpdatePrepper // nodeCache itself is goroutine-safe, but this object's use // of it has special requirements: // // - Reads can call PathFromNode() unlocked, since there are // no guarantees with concurrent reads. // // - Operations that takes mdWriterLock always needs the // most up-to-date paths, so those must call // PathFromNode() under mdWriterLock. // // - Block write operations (write/truncate/sync) need to // coordinate. Specifically, sync must make sure that // blocks referenced in a path (including all of the child // blocks) must exist in the cache during calls to // PathFromNode from write/truncate. This means that sync // must modify dirty file blocks only under blockLock, and // write/truncate must call PathFromNode() under // blockLock. // // Furthermore, calls to UpdatePointer() must happen // before the copy-on-write mode induced by Sync() is // finished. nodeCache NodeCache // Whether we've identified this TLF or not. identifyLock sync.Mutex identifyDone bool identifyTime time.Time // The current status summary for this folder status *folderBranchStatusKeeper // How to log log traceLogger deferLog traceLogger // Closed on shutdown shutdownChan chan struct{} // Can be used to turn off notifications for a while (e.g., for testing) updatePauseChan chan (<-chan struct{}) cancelUpdatesLock sync.Mutex // Cancels the goroutine currently waiting on TLF MD updates. cancelUpdates context.CancelFunc // After a shutdown, this channel will be closed when the register // goroutine completes. updateDoneChan chan struct{} // forceSyncChan is read from by the background sync process // to know when it should sync immediately. forceSyncChan <-chan struct{} // syncNeededChan is signalled when a buffered write happens, and // lets the background syncer wait rather than waking up all the // time. syncNeededChan chan struct{} // How to resolve conflicts cr *ConflictResolver // Helper class for archiving and cleaning up the blocks for this TLF fbm *folderBlockManager rekeyFSM RekeyFSM editHistory *TlfEditHistory branchChanges kbfssync.RepeatedWaitGroup mdFlushes kbfssync.RepeatedWaitGroup forcedFastForwards kbfssync.RepeatedWaitGroup merkleFetches kbfssync.RepeatedWaitGroup muLastGetHead sync.Mutex // We record a timestamp everytime getHead or getTrustedHead is called, and // use this as a heuristic for whether user is actively using KBFS. If user // has been generating KBFS activities recently, it makes sense to try to // reconnect as soon as possible in case of a deployment causes // disconnection. lastGetHead time.Time } var _ KBFSOps = (*folderBranchOps)(nil) var _ fbmHelper = (*folderBranchOps)(nil) // newFolderBranchOps constructs a new folderBranchOps object. func newFolderBranchOps(ctx context.Context, config Config, fb FolderBranch, bType branchType) *folderBranchOps { var nodeCache NodeCache if config.Mode() == InitMinimal { // If we're in minimal mode, let the node cache remain nil to // ensure that the user doesn't try any data reads or writes. } else { nodeCache = newNodeCacheStandard(fb) } // make logger branchSuffix := "" if fb.Branch != MasterBranch { branchSuffix = " " + string(fb.Branch) } tlfStringFull := fb.Tlf.String() // Shorten the TLF ID for the module name. 8 characters should be // unique enough for a local node. log := config.MakeLogger(fmt.Sprintf("FBO %s%s", tlfStringFull[:8], branchSuffix)) // But print it out once in full, just in case. log.CInfof(ctx, "Created new folder-branch for %s", tlfStringFull) observers := newObserverList() mdWriterLock := makeLeveledMutex(mutexLevel(fboMDWriter), &sync.Mutex{}) headLock := makeLeveledRWMutex(mutexLevel(fboHead), &sync.RWMutex{}) blockLockMu := makeLeveledRWMutex(mutexLevel(fboBlock), &sync.RWMutex{}) forceSyncChan := make(chan struct{}) fbo := &folderBranchOps{ config: config, folderBranch: fb, bid: BranchID{}, bType: bType, observers: observers, status: newFolderBranchStatusKeeper(config, nodeCache), mdWriterLock: mdWriterLock, headLock: headLock, blocks: folderBlockOps{ config: config, log: log, folderBranch: fb, observers: observers, forceSyncChan: forceSyncChan, blockLock: blockLock{ leveledRWMutex: blockLockMu, }, dirtyFiles: make(map[BlockPointer]*dirtyFile), deferred: make(map[BlockRef]deferredState), unrefCache: make(map[BlockRef]*syncInfo), deCache: make(map[BlockRef]deCacheEntry), nodeCache: nodeCache, }, nodeCache: nodeCache, log: traceLogger{log}, deferLog: traceLogger{log.CloneWithAddedDepth(1)}, shutdownChan: make(chan struct{}), updatePauseChan: make(chan (<-chan struct{})), forceSyncChan: forceSyncChan, syncNeededChan: make(chan struct{}, 1), } fbo.prepper = folderUpdatePrepper{ config: config, folderBranch: fb, blocks: &fbo.blocks, log: log, } fbo.cr = NewConflictResolver(config, fbo) fbo.fbm = newFolderBlockManager(config, fb, fbo) fbo.editHistory = NewTlfEditHistory(config, fbo, log) fbo.rekeyFSM = NewRekeyFSM(fbo) if config.DoBackgroundFlushes() { go fbo.backgroundFlusher() } return fbo } // markForReIdentifyIfNeeded checks whether this tlf is identified and mark // it for lazy reidentification if it exceeds time limits. func (fbo *folderBranchOps) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) { fbo.identifyLock.Lock() defer fbo.identifyLock.Unlock() if fbo.identifyDone && (now.Before(fbo.identifyTime) || fbo.identifyTime.Add(maxValid).Before(now)) { fbo.log.CDebugf(nil, "Expiring identify from %v", fbo.identifyTime) fbo.identifyDone = false } } // Shutdown safely shuts down any background goroutines that may have // been launched by folderBranchOps. func (fbo *folderBranchOps) Shutdown(ctx context.Context) error { if fbo.config.CheckStateOnShutdown() { lState := makeFBOLockState() if fbo.blocks.GetState(lState) == dirtyState { fbo.log.CDebugf(ctx, "Skipping state-checking due to dirty state") } else if !fbo.isMasterBranch(lState) { fbo.log.CDebugf(ctx, "Skipping state-checking due to being staged") } else { // Make sure we're up to date first if err := fbo.SyncFromServerForTesting(ctx, fbo.folderBranch, nil); err != nil { return err } // Check the state for consistency before shutting down. sc := NewStateChecker(fbo.config) if err := sc.CheckMergedState(ctx, fbo.id()); err != nil { return err } } } close(fbo.shutdownChan) fbo.merkleFetches.Wait(ctx) fbo.cr.Shutdown() fbo.fbm.shutdown() fbo.editHistory.Shutdown() fbo.rekeyFSM.Shutdown() // Wait for the update goroutine to finish, so that we don't have // any races with logging during test reporting. if fbo.updateDoneChan != nil { <-fbo.updateDoneChan } return nil } func (fbo *folderBranchOps) id() tlf.ID { return fbo.folderBranch.Tlf } func (fbo *folderBranchOps) branch() BranchName { return fbo.folderBranch.Branch } func (fbo *folderBranchOps) GetFavorites(ctx context.Context) ( []Favorite, error) { return nil, errors.New("GetFavorites is not supported by folderBranchOps") } func (fbo *folderBranchOps) RefreshCachedFavorites(ctx context.Context) { // no-op } func (fbo *folderBranchOps) DeleteFavorite(ctx context.Context, fav Favorite) error { return errors.New("DeleteFavorite is not supported by folderBranchOps") } func (fbo *folderBranchOps) AddFavorite(ctx context.Context, fav Favorite) error { return errors.New("AddFavorite is not supported by folderBranchOps") } func (fbo *folderBranchOps) addToFavorites(ctx context.Context, favorites *Favorites, created bool) (err error) { lState := makeFBOLockState() head := fbo.getTrustedHead(lState) if head == (ImmutableRootMetadata{}) { return OpsCantHandleFavorite{"Can't add a favorite without a handle"} } return fbo.addToFavoritesByHandle(ctx, favorites, head.GetTlfHandle(), created) } func (fbo *folderBranchOps) addToFavoritesByHandle(ctx context.Context, favorites *Favorites, handle *TlfHandle, created bool) (err error) { if _, err := fbo.config.KBPKI().GetCurrentSession(ctx); err != nil { // Can't favorite while not logged in return nil } favorites.AddAsync(ctx, handle.toFavToAdd(created)) return nil } func (fbo *folderBranchOps) deleteFromFavorites(ctx context.Context, favorites *Favorites) error { if _, err := fbo.config.KBPKI().GetCurrentSession(ctx); err != nil { // Can't unfavorite while not logged in return nil } lState := makeFBOLockState() head := fbo.getTrustedHead(lState) if head == (ImmutableRootMetadata{}) { // This can happen when identifies fail and the head is never set. return OpsCantHandleFavorite{"Can't delete a favorite without a handle"} } h := head.GetTlfHandle() return favorites.Delete(ctx, h.ToFavorite()) } func (fbo *folderBranchOps) doFavoritesOp(ctx context.Context, favs *Favorites, fop FavoritesOp, handle *TlfHandle) error { switch fop { case FavoritesOpNoChange: return nil case FavoritesOpAdd: if handle != nil { return fbo.addToFavoritesByHandle(ctx, favs, handle, false) } return fbo.addToFavorites(ctx, favs, false) case FavoritesOpAddNewlyCreated: if handle != nil { return fbo.addToFavoritesByHandle(ctx, favs, handle, true) } return fbo.addToFavorites(ctx, favs, true) case FavoritesOpRemove: return fbo.deleteFromFavorites(ctx, favs) default: return InvalidFavoritesOpError{} } } func (fbo *folderBranchOps) updateLastGetHeadTimestamp() { fbo.muLastGetHead.Lock() defer fbo.muLastGetHead.Unlock() fbo.lastGetHead = fbo.config.Clock().Now() } // getTrustedHead should not be called outside of folder_branch_ops.go. // Returns ImmutableRootMetadata{} when the head is not trusted. // See the comment on headTrustedStatus for more information. func (fbo *folderBranchOps) getTrustedHead(lState *lockState) ImmutableRootMetadata { fbo.headLock.RLock(lState) defer fbo.headLock.RUnlock(lState) if fbo.headStatus == headUntrusted { return ImmutableRootMetadata{} } // This triggers any mdserver backoff timer to fast forward. In case of a // deployment, this causes KBFS client to try to reconnect to mdserver // immediately rather than waiting until the random backoff timer is up. // Note that this doesn't necessarily guarantee that the fbo handler that // called this method would get latest MD. fbo.config.MDServer().FastForwardBackoff() fbo.updateLastGetHeadTimestamp() return fbo.head } // getHead should not be called outside of folder_branch_ops.go. func (fbo *folderBranchOps) getHead(lState *lockState) ( ImmutableRootMetadata, headTrustStatus) { fbo.headLock.RLock(lState) defer fbo.headLock.RUnlock(lState) // See getTrustedHead for explanation. fbo.config.MDServer().FastForwardBackoff() fbo.updateLastGetHeadTimestamp() return fbo.head, fbo.headStatus } // isMasterBranch should not be called if mdWriterLock is already taken. func (fbo *folderBranchOps) isMasterBranch(lState *lockState) bool { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.bid == NullBranchID } func (fbo *folderBranchOps) isMasterBranchLocked(lState *lockState) bool { fbo.mdWriterLock.AssertLocked(lState) return fbo.bid == NullBranchID } func (fbo *folderBranchOps) setBranchIDLocked(lState *lockState, bid BranchID) { fbo.mdWriterLock.AssertLocked(lState) if fbo.bid != bid { fbo.cr.BeginNewBranch() } fbo.bid = bid if bid == NullBranchID { fbo.status.setCRSummary(nil, nil) } } var errNoFlushedRevisions = errors.New("No flushed MDs yet") var errNoMergedRevWhileStaged = errors.New( "Cannot find most recent merged revision while staged") // getJournalPredecessorRevision returns the revision that precedes // the current journal head if journaling enabled and there are // unflushed MD updates; otherwise it returns // kbfsmd.RevisionUninitialized. If there aren't any flushed MD // revisions, it returns errNoFlushedRevisions. func (fbo *folderBranchOps) getJournalPredecessorRevision(ctx context.Context) ( kbfsmd.Revision, error) { jServer, err := GetJournalServer(fbo.config) if err != nil { // Journaling is disabled entirely. return kbfsmd.RevisionUninitialized, nil } jStatus, err := jServer.JournalStatus(fbo.id()) if err != nil { // Journaling is disabled for this TLF, so use the local head. // TODO: JournalStatus could return other errors (likely // file/disk corruption) that indicate a real problem, so it // might be nice to type those errors so we can distinguish // them. return kbfsmd.RevisionUninitialized, nil } if jStatus.BranchID != NullBranchID.String() { return kbfsmd.RevisionUninitialized, errNoMergedRevWhileStaged } if jStatus.RevisionStart == kbfsmd.RevisionUninitialized { // The journal is empty, so the local head must be the most recent. return kbfsmd.RevisionUninitialized, nil } else if jStatus.RevisionStart == kbfsmd.RevisionInitial { // Nothing has been flushed to the servers yet, so don't // return anything. return kbfsmd.RevisionUninitialized, errNoFlushedRevisions } return jStatus.RevisionStart - 1, nil } // validateHeadLocked validates an untrusted head and sets it as trusted. // see headTrustedState comment for more information. func (fbo *folderBranchOps) validateHeadLocked( ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.headLock.AssertLocked(lState) // Validate fbo against fetched md and discard the fetched one. if fbo.head.TlfID() != md.TlfID() { fbo.log.CCriticalf(ctx, "Fake untrusted TLF encountered %v %v %v %v", fbo.head.TlfID(), md.TlfID(), fbo.head.mdID, md.mdID) return MDTlfIDMismatch{fbo.head.TlfID(), md.TlfID()} } fbo.headStatus = headTrusted return nil } func (fbo *folderBranchOps) setHeadLocked( ctx context.Context, lState *lockState, md ImmutableRootMetadata, headStatus headTrustStatus) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) isFirstHead := fbo.head == ImmutableRootMetadata{} wasReadable := false if !isFirstHead { if headStatus == headUntrusted { panic("setHeadLocked: Trying to set an untrusted head over an existing head") } wasReadable = fbo.head.IsReadable() if fbo.headStatus == headUntrusted { err := fbo.validateHeadLocked(ctx, lState, md) if err != nil { return err } if fbo.head.mdID == md.mdID { return nil } } if fbo.head.mdID == md.mdID { panic(errors.Errorf("Re-putting the same MD: %s", md.mdID)) } } fbo.log.CDebugf(ctx, "Setting head revision to %d", md.Revision()) // If this is the first time the MD is being set, and we are // operating on unmerged data, initialize the state properly and // kick off conflict resolution. if isFirstHead && md.MergedStatus() == Unmerged { fbo.setBranchIDLocked(lState, md.BID()) // Use uninitialized for the merged branch; the unmerged // revision is enough to trigger conflict resolution. fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized) } else if md.MergedStatus() == Merged { journalEnabled := TLFJournalEnabled(fbo.config, fbo.id()) if journalEnabled { if isFirstHead { // If journaling is on, and this is the first head // we're setting, we have to make sure we use the // server's notion of the latest MD, not the one // potentially coming from our journal. If there are // no flushed revisions, it's not a hard error, and we // just leave the latest merged revision // uninitialized. journalPred, err := fbo.getJournalPredecessorRevision(ctx) switch err { case nil: // journalPred will be // kbfsmd.RevisionUninitialized when the journal // is empty. if journalPred >= kbfsmd.RevisionInitial { fbo.setLatestMergedRevisionLocked( ctx, lState, journalPred, false) } else { fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false) } case errNoFlushedRevisions: // The server has no revisions, so leave the // latest merged revision uninitialized. default: return err } } else { // If this isn't the first head, then this is either // an update from the server, or an update just // written by the client. But since journaling is on, // then latter case will be handled by onMDFlush when // the update is properly flushed to the server. So // ignore updates that haven't yet been put to the // server. if md.putToServer { fbo.setLatestMergedRevisionLocked( ctx, lState, md.Revision(), false) } } } else { // This is a merged revision, and journaling is disabled, // so it's definitely the latest revision on the server as // well. fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false) } } // Make sure that any unembedded block changes have been swapped // back in. if fbo.config.Mode() != InitMinimal && md.data.Changes.Info.BlockPointer != zeroPtr && len(md.data.Changes.Ops) == 0 { return errors.New("Must swap in block changes before setting head") } fbo.head = md if isFirstHead && headStatus == headTrusted { fbo.headStatus = headTrusted } fbo.status.setRootMetadata(md) if isFirstHead { // Start registering for updates right away, using this MD // as a starting point. For now only the master branch can // get updates if fbo.branch() == MasterBranch && fbo.config.Mode() != InitSingleOp { fbo.updateDoneChan = make(chan struct{}) go fbo.registerAndWaitForUpdates() } } if !wasReadable && md.IsReadable() { // Let any listeners know that this folder is now readable, // which may indicate that a rekey successfully took place. fbo.config.Reporter().Notify(ctx, mdReadSuccessNotification( md.GetTlfHandle(), md.TlfID().Type() == tlf.Public)) } return nil } // setInitialHeadUntrustedLocked is for when the given RootMetadata // was fetched not due to a user action, i.e. via a Rekey // notification, and we don't have a TLF name to check against. func (fbo *folderBranchOps) setInitialHeadUntrustedLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked") } return fbo.setHeadLocked(ctx, lState, md, headUntrusted) } // setNewInitialHeadLocked is for when we're creating a brand-new TLF. // This is trusted. func (fbo *folderBranchOps) setNewInitialHeadLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return errors.New("Unexpected non-nil head in setNewInitialHeadLocked") } if md.Revision() != kbfsmd.RevisionInitial { return errors.Errorf("setNewInitialHeadLocked unexpectedly called with revision %d", md.Revision()) } return fbo.setHeadLocked(ctx, lState, md, headTrusted) } // setInitialHeadTrustedLocked is for when the given RootMetadata // was fetched due to a user action, and will be checked against the // TLF name. func (fbo *folderBranchOps) setInitialHeadTrustedLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked") } return fbo.setHeadLocked(ctx, lState, md, headTrusted) } // setHeadSuccessorLocked is for when we're applying updates from the // server or when we're applying new updates we created ourselves. func (fbo *folderBranchOps) setHeadSuccessorLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata, rebased bool) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head == (ImmutableRootMetadata{}) { // This can happen in tests via SyncFromServerForTesting(). return fbo.setInitialHeadTrustedLocked(ctx, lState, md) } if !rebased { err := fbo.head.CheckValidSuccessor(fbo.head.mdID, md.ReadOnly()) if err != nil { return err } } oldHandle := fbo.head.GetTlfHandle() newHandle := md.GetTlfHandle() // Newer handles should be equal or more resolved over time. // // TODO: In some cases, they shouldn't, e.g. if we're on an // unmerged branch. Add checks for this. resolvesTo, partialResolvedOldHandle, err := oldHandle.ResolvesTo( ctx, fbo.config.Codec(), fbo.config.KBPKI(), *newHandle) if err != nil { return err } oldName := oldHandle.GetCanonicalName() newName := newHandle.GetCanonicalName() if !resolvesTo { return IncompatibleHandleError{ oldName, partialResolvedOldHandle.GetCanonicalName(), newName, } } err = fbo.setHeadLocked(ctx, lState, md, headTrusted) if err != nil { return err } if oldName != newName { fbo.log.CDebugf(ctx, "Handle changed (%s -> %s)", oldName, newName) // If the handle has changed, send out a notification. fbo.observers.tlfHandleChange(ctx, fbo.head.GetTlfHandle()) // Also the folder should be re-identified given the // newly-resolved assertions. func() { fbo.identifyLock.Lock() defer fbo.identifyLock.Unlock() fbo.identifyDone = false }() } return nil } // setHeadPredecessorLocked is for when we're unstaging updates. func (fbo *folderBranchOps) setHeadPredecessorLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head == (ImmutableRootMetadata{}) { return errors.New("Unexpected nil head in setHeadPredecessorLocked") } if fbo.head.Revision() <= kbfsmd.RevisionInitial { return errors.Errorf("setHeadPredecessorLocked unexpectedly called with revision %d", fbo.head.Revision()) } if fbo.head.MergedStatus() != Unmerged { return errors.New("Unexpected merged head in setHeadPredecessorLocked") } err := md.CheckValidSuccessor(md.mdID, fbo.head.ReadOnly()) if err != nil { return err } oldHandle := fbo.head.GetTlfHandle() newHandle := md.GetTlfHandle() // The two handles must be the same, since no rekeying is done // while unmerged. eq, err := oldHandle.Equals(fbo.config.Codec(), *newHandle) if err != nil { return err } if !eq { return errors.Errorf( "head handle %v unexpectedly not equal to new handle = %v", oldHandle, newHandle) } return fbo.setHeadLocked(ctx, lState, md, headTrusted) } // setHeadConflictResolvedLocked is for when we're setting the merged // update with resolved conflicts. func (fbo *folderBranchOps) setHeadConflictResolvedLocked(ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.head.MergedStatus() != Unmerged { return errors.New("Unexpected merged head in setHeadConflictResolvedLocked") } if md.MergedStatus() != Merged { return errors.New("Unexpected unmerged update in setHeadConflictResolvedLocked") } return fbo.setHeadLocked(ctx, lState, md, headTrusted) } func (fbo *folderBranchOps) identifyOnce( ctx context.Context, md ReadOnlyRootMetadata) error { fbo.identifyLock.Lock() defer fbo.identifyLock.Unlock() ei := getExtendedIdentify(ctx) if fbo.identifyDone && !ei.behavior.AlwaysRunIdentify() { // TODO: provide a way for the service to break this cache when identify // state changes on a TLF. For now, we do it this way to make chat work. return nil } h := md.GetTlfHandle() fbo.log.CDebugf(ctx, "Running identifies on %s", h.GetCanonicalPath()) kbpki := fbo.config.KBPKI() err := identifyHandle(ctx, kbpki, kbpki, h) if err != nil { fbo.log.CDebugf(ctx, "Identify finished with error: %v", err) // For now, if the identify fails, let the // next function to hit this code path retry. return err } if ei.behavior.WarningInsteadOfErrorOnBrokenTracks() && len(ei.getTlfBreakAndClose().Breaks) > 0 { fbo.log.CDebugf(ctx, "Identify finished with no error but broken proof warnings") } else if ei.behavior == keybase1.TLFIdentifyBehavior_CHAT_SKIP { fbo.log.CDebugf(ctx, "Identify skipped") } else { fbo.log.CDebugf(ctx, "Identify finished successfully") fbo.identifyDone = true fbo.identifyTime = fbo.config.Clock().Now() } return nil } // getMDForReadLocked returns an existing md for a read // operation. Note that mds will not be fetched here. func (fbo *folderBranchOps) getMDForReadLocked( ctx context.Context, lState *lockState, rtype mdReadType) ( md ImmutableRootMetadata, err error) { if rtype != mdReadNeedIdentify && rtype != mdReadNoIdentify { panic("Invalid rtype in getMDLockedForRead") } md = fbo.getTrustedHead(lState) if md != (ImmutableRootMetadata{}) { if rtype != mdReadNoIdentify { err = fbo.identifyOnce(ctx, md.ReadOnly()) } return md, err } return ImmutableRootMetadata{}, MDWriteNeededInRequest{} } // getMDForWriteOrRekeyLocked can fetch MDs, identify them and // contains the fancy logic. For reading use getMDLockedForRead. // Here we actually can fetch things from the server. // rekeys are untrusted. func (fbo *folderBranchOps) getMDForWriteOrRekeyLocked( ctx context.Context, lState *lockState, mdType mdUpdateType) ( md ImmutableRootMetadata, err error) { defer func() { if err != nil || mdType == mdRekey { return } err = fbo.identifyOnce(ctx, md.ReadOnly()) }() md = fbo.getTrustedHead(lState) if md != (ImmutableRootMetadata{}) { return md, nil } // MDs coming from from rekey notifications are marked untrusted. // // TODO: Make tests not take this code path. fbo.mdWriterLock.AssertLocked(lState) // Not in cache, fetch from server and add to cache. First, see // if this device has any unmerged commits -- take the latest one. mdops := fbo.config.MDOps() // get the head of the unmerged branch for this device (if any) md, err = mdops.GetUnmergedForTLF(ctx, fbo.id(), NullBranchID) if err != nil { return ImmutableRootMetadata{}, err } mergedMD, err := mdops.GetForTLF(ctx, fbo.id(), nil) if err != nil { return ImmutableRootMetadata{}, err } if mergedMD == (ImmutableRootMetadata{}) { return ImmutableRootMetadata{}, errors.WithStack(NoMergedMDError{fbo.id()}) } if md == (ImmutableRootMetadata{}) { // There are no unmerged MDs for this device, so just use the current head. md = mergedMD } else { func() { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // We don't need to do this for merged head // because the setHeadLocked() already does // that anyway. fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision(), false) }() } if md.data.Dir.Type != Dir && (!md.IsInitialized() || md.IsReadable()) { return ImmutableRootMetadata{}, errors.Errorf("Got undecryptable RMD for %s: initialized=%t, readable=%t", fbo.id(), md.IsInitialized(), md.IsReadable()) } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) headStatus := headTrusted if mdType == mdRekey { // If we already have a head (that has been filled after the initial // check, but before we acquired the lock), then just return it. if fbo.head != (ImmutableRootMetadata{}) { return fbo.head, nil } headStatus = headUntrusted } err = fbo.setHeadLocked(ctx, lState, md, headStatus) if err != nil { return ImmutableRootMetadata{}, err } return md, nil } func (fbo *folderBranchOps) getMDForReadHelper( ctx context.Context, lState *lockState, rtype mdReadType) (ImmutableRootMetadata, error) { md, err := fbo.getMDForReadLocked(ctx, lState, rtype) if err != nil { return ImmutableRootMetadata{}, err } if md.TlfID().Type() != tlf.Public { session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return ImmutableRootMetadata{}, err } isReader, err := md.IsReader(ctx, fbo.config.KBPKI(), session.UID) if err != nil { return ImmutableRootMetadata{}, err } if !isReader { return ImmutableRootMetadata{}, NewReadAccessError( md.GetTlfHandle(), session.Name, md.GetTlfHandle().GetCanonicalPath()) } } return md, nil } // getMostRecentFullyMergedMD is a helper method that returns the most // recent merged MD that has been flushed to the server. This could // be different from the current local head if journaling is on. If // the journal is on a branch, it returns an error. func (fbo *folderBranchOps) getMostRecentFullyMergedMD(ctx context.Context) ( ImmutableRootMetadata, error) { mergedRev, err := fbo.getJournalPredecessorRevision(ctx) if err != nil { return ImmutableRootMetadata{}, err } if mergedRev == kbfsmd.RevisionUninitialized { // No unflushed journal entries, so use the local head. lState := makeFBOLockState() return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify) } // Otherwise, use the specified revision. rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID, mergedRev, Merged, nil) if err != nil { return ImmutableRootMetadata{}, err } fbo.log.CDebugf(ctx, "Most recent fully merged revision is %d", mergedRev) return rmd, nil } func (fbo *folderBranchOps) getMDForReadNoIdentify( ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) { return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify) } func (fbo *folderBranchOps) getMDForReadNeedIdentify( ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) { return fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify) } // getMDForReadNeedIdentifyOnMaybeFirstAccess should be called by a // code path (like chat) that might be accessing this folder for the // first time. Other folderBranchOps methods like Lookup which know // the folder has already been accessed at least once (to get the root // node, for example) do not need to call this. Unlike other getMD // calls, this one may return a nil ImmutableRootMetadata along with a // nil error, to indicate that there isn't any MD for this TLF yet and // one must be created by the caller. func (fbo *folderBranchOps) getMDForReadNeedIdentifyOnMaybeFirstAccess( ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) { md, err := fbo.getMDForReadLocked(ctx, lState, mdReadNeedIdentify) if _, ok := err.(MDWriteNeededInRequest); ok { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) md, err = fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite) } if _, noMD := errors.Cause(err).(NoMergedMDError); noMD { return ImmutableRootMetadata{}, nil } if err != nil { return ImmutableRootMetadata{}, err } if md.TlfID().Type() != tlf.Public { session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return ImmutableRootMetadata{}, err } isReader, err := md.IsReader(ctx, fbo.config.KBPKI(), session.UID) if !isReader { return ImmutableRootMetadata{}, NewReadAccessError( md.GetTlfHandle(), session.Name, md.GetTlfHandle().GetCanonicalPath()) } } return md, nil } func (fbo *folderBranchOps) getMDForWriteLockedForFilename( ctx context.Context, lState *lockState, filename string) ( ImmutableRootMetadata, error) { fbo.mdWriterLock.AssertLocked(lState) md, err := fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite) if err != nil { return ImmutableRootMetadata{}, err } session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return ImmutableRootMetadata{}, err } isWriter, err := md.IsWriter( ctx, fbo.config.KBPKI(), session.UID, session.VerifyingKey) if err != nil { return ImmutableRootMetadata{}, err } if !isWriter { return ImmutableRootMetadata{}, NewWriteAccessError( md.GetTlfHandle(), session.Name, filename) } return md, nil } func (fbo *folderBranchOps) getSuccessorMDForWriteLockedForFilename( ctx context.Context, lState *lockState, filename string) ( *RootMetadata, error) { fbo.mdWriterLock.AssertLocked(lState) md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename) if err != nil { return nil, err } // Make a new successor of the current MD to hold the coming // writes. The caller must pass this into `finalizeMDWriteLocked` // or the changes will be lost. return md.MakeSuccessor(ctx, fbo.config.MetadataVersion(), fbo.config.Codec(), fbo.config.KeyManager(), fbo.config.KBPKI(), fbo.config.KBPKI(), md.mdID, true) } // getSuccessorMDForWriteLocked returns a new RootMetadata object with // an incremented version number for modification. If the returned // object is put to the MDServer (via MDOps), mdWriterLock must be // held until then. (See comments for mdWriterLock above.) func (fbo *folderBranchOps) getSuccessorMDForWriteLocked( ctx context.Context, lState *lockState) (*RootMetadata, error) { return fbo.getSuccessorMDForWriteLockedForFilename(ctx, lState, "") } func (fbo *folderBranchOps) getMDForRekeyWriteLocked( ctx context.Context, lState *lockState) ( rmd *RootMetadata, lastWriterVerifyingKey kbfscrypto.VerifyingKey, wasRekeySet bool, err error) { fbo.mdWriterLock.AssertLocked(lState) md, err := fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdRekey) if err != nil { return nil, kbfscrypto.VerifyingKey{}, false, err } session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return nil, kbfscrypto.VerifyingKey{}, false, err } handle := md.GetTlfHandle() // must be a reader or writer (it checks both.) if !handle.IsReader(session.UID) { return nil, kbfscrypto.VerifyingKey{}, false, NewRekeyPermissionError(md.GetTlfHandle(), session.Name) } newMd, err := md.MakeSuccessor(ctx, fbo.config.MetadataVersion(), fbo.config.Codec(), fbo.config.KeyManager(), fbo.config.KBPKI(), fbo.config.KBPKI(), md.mdID, handle.IsWriter(session.UID)) if err != nil { return nil, kbfscrypto.VerifyingKey{}, false, err } // readers shouldn't modify writer metadata if !handle.IsWriter(session.UID) && !newMd.IsWriterMetadataCopiedSet() { return nil, kbfscrypto.VerifyingKey{}, false, NewRekeyPermissionError(handle, session.Name) } return newMd, md.LastModifyingWriterVerifyingKey(), md.IsRekeySet(), nil } func (fbo *folderBranchOps) nowUnixNano() int64 { return fbo.config.Clock().Now().UnixNano() } func (fbo *folderBranchOps) maybeUnembedAndPutBlocks(ctx context.Context, md *RootMetadata) (*blockPutState, error) { if fbo.config.BlockSplitter().ShouldEmbedBlockChanges(&md.data.Changes) { return nil, nil } chargedTo, err := chargedToForTLF( ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), md.GetTlfHandle()) if err != nil { return nil, err } bps := newBlockPutState(1) err = fbo.prepper.unembedBlockChanges( ctx, bps, md, &md.data.Changes, chargedTo) if err != nil { return nil, err } defer func() { if err != nil { fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail) } }() ptrsToDelete, err := doBlockPuts(ctx, fbo.config.BlockServer(), fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, fbo.deferLog, md.TlfID(), md.GetTlfHandle().GetCanonicalName(), *bps) if err != nil { return nil, err } if len(ptrsToDelete) > 0 { return nil, errors.Errorf("Unexpected pointers to delete after "+ "unembedding block changes in gc op: %v", ptrsToDelete) } return bps, nil } // ResetRootBlock creates a new empty dir block and sets the given // metadata's root block to it. func ResetRootBlock(ctx context.Context, config Config, rmd *RootMetadata) (Block, BlockInfo, ReadyBlockData, error) { newDblock := NewDirBlock() chargedTo, err := chargedToForTLF( ctx, config.KBPKI(), config.KBPKI(), rmd.GetTlfHandle()) if err != nil { return nil, BlockInfo{}, ReadyBlockData{}, err } info, plainSize, readyBlockData, err := ReadyBlock(ctx, config.BlockCache(), config.BlockOps(), config.Crypto(), rmd.ReadOnly(), newDblock, chargedTo, config.DefaultBlockType()) if err != nil { return nil, BlockInfo{}, ReadyBlockData{}, err } now := config.Clock().Now().UnixNano() rmd.data.Dir = DirEntry{ BlockInfo: info, EntryInfo: EntryInfo{ Type: Dir, Size: uint64(plainSize), Mtime: now, Ctime: now, }, } prevDiskUsage := rmd.DiskUsage() rmd.SetDiskUsage(0) // Redundant, since this is called only for brand-new or // successor RMDs, but leave in to be defensive. rmd.ClearBlockChanges() co := newCreateOpForRootDir() rmd.AddOp(co) rmd.AddRefBlock(rmd.data.Dir.BlockInfo) // Set unref bytes to the previous disk usage, so that the // accounting works out. rmd.AddUnrefBytes(prevDiskUsage) return newDblock, info, readyBlockData, nil } func (fbo *folderBranchOps) initMDLocked( ctx context.Context, lState *lockState, md *RootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return err } handle := md.GetTlfHandle() // make sure we're a writer before rekeying or putting any blocks. isWriter, err := md.IsWriter( ctx, fbo.config.KBPKI(), session.UID, session.VerifyingKey) if err != nil { return err } if !isWriter { return NewWriteAccessError( handle, session.Name, handle.GetCanonicalPath()) } var expectedKeyGen KeyGen var tlfCryptKey *kbfscrypto.TLFCryptKey switch md.TlfID().Type() { case tlf.Public: expectedKeyGen = PublicKeyGen case tlf.Private: var rekeyDone bool // create a new set of keys for this metadata rekeyDone, tlfCryptKey, err = fbo.config.KeyManager().Rekey(ctx, md, false) if err != nil { return err } if !rekeyDone { return errors.Errorf("Initial rekey unexpectedly not done for "+ "private TLF %v", md.TlfID()) } expectedKeyGen = FirstValidKeyGen case tlf.SingleTeam: // Teams get their crypt key from the service, no need to // rekey in KBFS. tid, err := handle.FirstResolvedWriter().AsTeam() if err != nil { return err } keys, keyGen, err := fbo.config.KBPKI().GetTeamTLFCryptKeys( ctx, tid, UnspecifiedKeyGen) if err != nil { return err } if keyGen < FirstValidKeyGen { return errors.WithStack( InvalidKeyGenerationError{md.TlfID(), keyGen}) } expectedKeyGen = keyGen md.bareMd.SetLatestKeyGenerationForTeamTLF(keyGen) key, ok := keys[keyGen] if !ok { return errors.WithStack( InvalidKeyGenerationError{md.TlfID(), keyGen}) } tlfCryptKey = &key } keyGen := md.LatestKeyGeneration() if keyGen != expectedKeyGen { return InvalidKeyGenerationError{md.TlfID(), keyGen} } // create a dblock since one doesn't exist yet newDblock, info, readyBlockData, err := ResetRootBlock(ctx, fbo.config, md) if err != nil { return err } // Some other thread got here first, so give up and let it go // before we push anything to the servers. if h, _ := fbo.getHead(lState); h != (ImmutableRootMetadata{}) { fbo.log.CDebugf(ctx, "Head was already set, aborting") return nil } if err = PutBlockCheckLimitErrs(ctx, fbo.config.BlockServer(), fbo.config.Reporter(), md.TlfID(), info.BlockPointer, readyBlockData, md.GetTlfHandle().GetCanonicalName()); err != nil { return err } if err = fbo.config.BlockCache().Put( info.BlockPointer, fbo.id(), newDblock, TransientEntry); err != nil { return err } bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md) if err != nil { return err } err = fbo.finalizeBlocks(bps) if err != nil { return err } // Finally, write out the new metadata. TODO: if journaling is // enabled, we should bypass it here, so we don't have to worry // about delayed conflicts (since this is essentially a rekey, and // we always bypass the journal for rekeys). The caller will have // to intelligently deal with a conflict. irmd, err := fbo.config.MDOps().Put( ctx, md, session.VerifyingKey, nil, keybase1.MDPriorityNormal) if err != nil { return err } md.loadCachedBlockChanges(ctx, bps, fbo.log) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) if fbo.head != (ImmutableRootMetadata{}) { return errors.Errorf( "%v: Unexpected MD ID during new MD initialization: %v", md.TlfID(), fbo.head.mdID) } fbo.setNewInitialHeadLocked(ctx, lState, irmd) if err != nil { return err } // cache any new TLF crypt key if tlfCryptKey != nil { err = fbo.config.KeyCache().PutTLFCryptKey( md.TlfID(), keyGen, *tlfCryptKey) if err != nil { return err } } return nil } func (fbo *folderBranchOps) GetTLFCryptKeys(ctx context.Context, h *TlfHandle) (keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error) { return nil, tlf.ID{}, errors.New("GetTLFCryptKeys is not supported by folderBranchOps") } func (fbo *folderBranchOps) GetTLFID(ctx context.Context, h *TlfHandle) (tlf.ID, error) { return tlf.ID{}, errors.New("GetTLFID is not supported by folderBranchOps") } func (fbo *folderBranchOps) GetOrCreateRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) { return nil, EntryInfo{}, errors.New("GetOrCreateRootNode is not supported by folderBranchOps") } func (fbo *folderBranchOps) GetRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) { return nil, EntryInfo{}, errors.New("GetRootNode is not supported by folderBranchOps") } func (fbo *folderBranchOps) checkNode(node Node) error { fb := node.GetFolderBranch() if fb != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, fb} } return nil } // SetInitialHeadFromServer sets the head to the given // ImmutableRootMetadata, which must be retrieved from the MD server. func (fbo *folderBranchOps) SetInitialHeadFromServer( ctx context.Context, md ImmutableRootMetadata) (err error) { fbo.log.CDebugf(ctx, "SetInitialHeadFromServer, revision=%d (%s)", md.Revision(), md.MergedStatus()) defer func() { fbo.deferLog.CDebugf(ctx, "SetInitialHeadFromServer, revision=%d (%s) done: %+v", md.Revision(), md.MergedStatus(), err) }() if md.IsReadable() && fbo.config.Mode() != InitMinimal { // We `Get` the root block to ensure downstream prefetches occur. _ = fbo.config.BlockOps().BlockRetriever().Request(ctx, defaultOnDemandRequestPriority, md, md.data.Dir.BlockPointer, &DirBlock{}, TransientEntry) } else { fbo.log.CDebugf(ctx, "Setting an unreadable head with revision=%d", md.Revision()) } // Return early if the head is already set. This avoids taking // mdWriterLock for no reason, and it also avoids any side effects // (e.g., calling `identifyOnce` and downloading the merged // head) if head is already set. lState := makeFBOLockState() head, headStatus := fbo.getHead(lState) if headStatus == headTrusted && head != (ImmutableRootMetadata{}) && head.mdID == md.mdID { fbo.log.CDebugf(ctx, "Head MD already set to revision %d (%s), no "+ "need to set initial head again", md.Revision(), md.MergedStatus()) return nil } return runUnlessCanceled(ctx, func() error { fb := FolderBranch{md.TlfID(), MasterBranch} if fb != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, fb} } // Always identify first when trying to initialize the folder, // even if we turn out not to be a writer. (We can't rely on // the identifyOnce call in getMDLocked, because that isn't // called from the initialization code path when the local // user is not a valid writer.) Also, we want to make sure we // fail before we set the head, otherwise future calls will // succeed incorrectly. err = fbo.identifyOnce(ctx, md.ReadOnly()) if err != nil { return err } lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) if md.MergedStatus() == Unmerged { mdops := fbo.config.MDOps() mergedMD, err := mdops.GetForTLF(ctx, fbo.id(), nil) if err != nil { return err } func() { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision(), false) }() } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // Only update the head the first time; later it will be // updated either directly via writes or through the // background update processor. if fbo.head == (ImmutableRootMetadata{}) { err = fbo.setInitialHeadTrustedLocked(ctx, lState, md) if err != nil { return err } } else if headStatus == headUntrusted { err = fbo.validateHeadLocked(ctx, lState, md) if err != nil { return err } } return nil }) } // SetInitialHeadToNew creates a brand-new ImmutableRootMetadata // object and sets the head to that. This is trusted. func (fbo *folderBranchOps) SetInitialHeadToNew( ctx context.Context, id tlf.ID, handle *TlfHandle) (err error) { fbo.log.CDebugf(ctx, "SetInitialHeadToNew %s", id) defer func() { fbo.deferLog.CDebugf(ctx, "SetInitialHeadToNew %s done: %+v", id, err) }() rmd, err := makeInitialRootMetadata( fbo.config.MetadataVersion(), id, handle) if err != nil { return err } return runUnlessCanceled(ctx, func() error { fb := FolderBranch{rmd.TlfID(), MasterBranch} if fb != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, fb} } // Always identify first when trying to initialize the folder, // even if we turn out not to be a writer. (We can't rely on // the identifyOnce call in getMDLocked, because that isn't // called from the initialization code path when the local // user is not a valid writer.) Also, we want to make sure we // fail before we set the head, otherwise future calls will // succeed incorrectly. err = fbo.identifyOnce(ctx, rmd.ReadOnly()) if err != nil { return err } lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.initMDLocked(ctx, lState, rmd) }) } func getNodeIDStr(n Node) string { if n == nil { return "NodeID(nil)" } return fmt.Sprintf("NodeID(%v)", n.GetID()) } func (fbo *folderBranchOps) getRootNode(ctx context.Context) ( node Node, ei EntryInfo, handle *TlfHandle, err error) { fbo.log.CDebugf(ctx, "getRootNode") defer func() { fbo.deferLog.CDebugf(ctx, "getRootNode done: %s %+v", getNodeIDStr(node), err) }() lState := makeFBOLockState() var md ImmutableRootMetadata md, err = fbo.getMDForReadLocked(ctx, lState, mdReadNoIdentify) if _, ok := err.(MDWriteNeededInRequest); ok { func() { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) md, err = fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite) }() } if err != nil { return nil, EntryInfo{}, nil, err } // we may be an unkeyed client if err := isReadableOrError(ctx, fbo.config.KBPKI(), md.ReadOnly()); err != nil { return nil, EntryInfo{}, nil, err } handle = md.GetTlfHandle() node, err = fbo.nodeCache.GetOrCreate(md.data.Dir.BlockPointer, string(handle.GetCanonicalName()), nil) if err != nil { return nil, EntryInfo{}, nil, err } return node, md.Data().Dir.EntryInfo, handle, nil } type makeNewBlock func() Block // pathFromNodeHelper() shouldn't be called except by the helper // functions below. func (fbo *folderBranchOps) pathFromNodeHelper(n Node) (path, error) { p := fbo.nodeCache.PathFromNode(n) if !p.isValid() { return path{}, InvalidPathError{p} } return p, nil } // Helper functions to clarify uses of pathFromNodeHelper() (see // nodeCache comments). func (fbo *folderBranchOps) pathFromNodeForRead(n Node) (path, error) { return fbo.pathFromNodeHelper(n) } func (fbo *folderBranchOps) pathFromNodeForMDWriteLocked( lState *lockState, n Node) (path, error) { fbo.mdWriterLock.AssertLocked(lState) return fbo.pathFromNodeHelper(n) } func (fbo *folderBranchOps) GetDirChildren(ctx context.Context, dir Node) ( children map[string]EntryInfo, err error) { fbo.log.CDebugf(ctx, "GetDirChildren %s", getNodeIDStr(dir)) defer func() { fbo.deferLog.CDebugf(ctx, "GetDirChildren %s done, %d entries: %+v", getNodeIDStr(dir), len(children), err) }() err = fbo.checkNode(dir) if err != nil { return nil, err } err = runUnlessCanceled(ctx, func() error { var err error lState := makeFBOLockState() dirPath, err := fbo.pathFromNodeForRead(dir) if err != nil { return err } if fbo.nodeCache.IsUnlinked(dir) { fbo.log.CDebugf(ctx, "Returning an empty children set for "+ "unlinked directory %v", dirPath.tailPointer()) return nil } md, err := fbo.getMDForReadNeedIdentify(ctx, lState) if err != nil { return err } children, err = fbo.blocks.GetDirtyDirChildren( ctx, lState, md.ReadOnly(), dirPath) if err != nil { return err } return nil }) if err != nil { return nil, err } return children, nil } func (fbo *folderBranchOps) Lookup(ctx context.Context, dir Node, name string) ( node Node, ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "Lookup %s %s", getNodeIDStr(dir), name) defer func() { fbo.deferLog.CDebugf(ctx, "Lookup %s %s done: %v %+v", getNodeIDStr(dir), name, getNodeIDStr(node), err) }() err = fbo.checkNode(dir) if err != nil { return nil, EntryInfo{}, err } var de DirEntry err = runUnlessCanceled(ctx, func() error { if fbo.nodeCache.IsUnlinked(dir) { fbo.log.CDebugf(ctx, "Refusing a lookup for unlinked directory %v", fbo.nodeCache.PathFromNode(dir).tailPointer()) return NoSuchNameError{name} } lState := makeFBOLockState() md, err := fbo.getMDForReadNeedIdentify(ctx, lState) if err != nil { return err } node, de, err = fbo.blocks.Lookup(ctx, lState, md.ReadOnly(), dir, name) if err != nil { return err } return nil }) if err != nil { return nil, EntryInfo{}, err } return node, de.EntryInfo, nil } // statEntry is like Stat, but it returns a DirEntry. This is used by // tests. func (fbo *folderBranchOps) statEntry(ctx context.Context, node Node) ( de DirEntry, err error) { err = fbo.checkNode(node) if err != nil { return DirEntry{}, err } lState := makeFBOLockState() nodePath, err := fbo.pathFromNodeForRead(node) if err != nil { return DirEntry{}, err } var md ImmutableRootMetadata if nodePath.hasValidParent() { md, err = fbo.getMDForReadNeedIdentify(ctx, lState) } else { // If nodePath has no valid parent, it's just the TLF // root, so we don't need an identify in this case. md, err = fbo.getMDForReadNoIdentify(ctx, lState) } if err != nil { return DirEntry{}, err } if nodePath.hasValidParent() { de, err = fbo.blocks.GetDirtyEntryEvenIfDeleted( ctx, lState, md.ReadOnly(), nodePath) if err != nil { return DirEntry{}, err } } else { // nodePath is just the root. de = md.data.Dir de = fbo.blocks.UpdateDirtyEntry(ctx, lState, de) } return de, nil } var zeroPtr BlockPointer type blockState struct { blockPtr BlockPointer block Block readyBlockData ReadyBlockData syncedCb func() error oldPtr BlockPointer } func (fbo *folderBranchOps) Stat(ctx context.Context, node Node) ( ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "Stat %s", getNodeIDStr(node)) defer func() { fbo.deferLog.CDebugf(ctx, "Stat %s done: %+v", getNodeIDStr(node), err) }() var de DirEntry err = runUnlessCanceled(ctx, func() error { de, err = fbo.statEntry(ctx, node) return err }) if err != nil { return EntryInfo{}, err } return de.EntryInfo, nil } func (fbo *folderBranchOps) GetNodeMetadata(ctx context.Context, node Node) ( res NodeMetadata, err error) { fbo.log.CDebugf(ctx, "GetNodeMetadata %s", getNodeIDStr(node)) defer func() { fbo.deferLog.CDebugf(ctx, "GetNodeMetadata %s done: %+v", getNodeIDStr(node), err) }() var de DirEntry err = runUnlessCanceled(ctx, func() error { de, err = fbo.statEntry(ctx, node) return err }) if err != nil { return res, err } res.BlockInfo = de.BlockInfo id := de.TeamWriter.AsUserOrTeam() if id.IsNil() { id = de.Writer } if id.IsNil() { id = de.Creator } res.LastWriterUnverified, err = fbo.config.KBPKI().GetNormalizedUsername(ctx, id) if err != nil { return res, err } prefetchStatus := fbo.config.PrefetchStatus(ctx, fbo.id(), res.BlockInfo.BlockPointer) res.PrefetchStatus = prefetchStatus.String() return res, nil } // blockPutState is an internal structure to track data when putting blocks type blockPutState struct { blockStates []blockState } func newBlockPutState(length int) *blockPutState { bps := &blockPutState{} bps.blockStates = make([]blockState, 0, length) return bps } // addNewBlock tracks a new block that will be put. If syncedCb is // non-nil, it will be called whenever the put for that block is // complete (whether or not the put resulted in an error). Currently // it will not be called if the block is never put (due to an earlier // error). func (bps *blockPutState) addNewBlock( blockPtr BlockPointer, block Block, readyBlockData ReadyBlockData, syncedCb func() error) { bps.blockStates = append(bps.blockStates, blockState{blockPtr, block, readyBlockData, syncedCb, zeroPtr}) } // saveOldPtr stores the given BlockPointer as the old (pre-readied) // pointer for the most recent blockState. func (bps *blockPutState) saveOldPtr(oldPtr BlockPointer) { bps.blockStates[len(bps.blockStates)-1].oldPtr = oldPtr } func (bps *blockPutState) mergeOtherBps(other *blockPutState) { bps.blockStates = append(bps.blockStates, other.blockStates...) } func (bps *blockPutState) removeOtherBps(other *blockPutState) { if len(other.blockStates) == 0 { return } otherPtrs := make(map[BlockPointer]bool, len(other.blockStates)) for _, bs := range other.blockStates { otherPtrs[bs.blockPtr] = true } // Assume that `other` is a subset of `bps` when initializing the // slice length. newLen := len(bps.blockStates) - len(other.blockStates) if newLen <= 0 { newLen = 1 } // Remove any blocks that appear in `other`. newBlockStates := make([]blockState, 0, newLen) for _, bs := range bps.blockStates { if otherPtrs[bs.blockPtr] { continue } newBlockStates = append(newBlockStates, bs) } bps.blockStates = newBlockStates } func (bps *blockPutState) DeepCopy() *blockPutState { newBps := &blockPutState{} newBps.blockStates = make([]blockState, len(bps.blockStates)) copy(newBps.blockStates, bps.blockStates) return newBps } type localBcache map[BlockPointer]*DirBlock // Returns whether the given error is one that shouldn't block the // removal of a file or directory. // // TODO: Consider other errors recoverable, e.g. ones that arise from // present but corrupted blocks? func isRecoverableBlockErrorForRemoval(err error) bool { return isRecoverableBlockError(err) } func isRetriableError(err error, retries int) bool { _, isExclOnUnmergedError := err.(ExclOnUnmergedError) _, isUnmergedSelfConflictError := err.(UnmergedSelfConflictError) recoverable := isExclOnUnmergedError || isUnmergedSelfConflictError || isRecoverableBlockError(err) return recoverable && retries < maxRetriesOnRecoverableErrors } func (fbo *folderBranchOps) finalizeBlocks(bps *blockPutState) error { if bps == nil { return nil } bcache := fbo.config.BlockCache() for _, blockState := range bps.blockStates { newPtr := blockState.blockPtr // only cache this block if we made a brand new block, not if // we just incref'd some other block. if !newPtr.IsFirstRef() { continue } if err := bcache.Put(newPtr, fbo.id(), blockState.block, TransientEntry); err != nil { return err } } return nil } // Returns true if the passed error indicates a revision conflict. func isRevisionConflict(err error) bool { if err == nil { return false } _, isConflictRevision := err.(kbfsmd.ServerErrorConflictRevision) _, isConflictPrevRoot := err.(kbfsmd.ServerErrorConflictPrevRoot) _, isConflictDiskUsage := err.(kbfsmd.ServerErrorConflictDiskUsage) _, isConditionFailed := err.(kbfsmd.ServerErrorConditionFailed) _, isConflictFolderMapping := err.(kbfsmd.ServerErrorConflictFolderMapping) _, isJournal := err.(MDJournalConflictError) return isConflictRevision || isConflictPrevRoot || isConflictDiskUsage || isConditionFailed || isConflictFolderMapping || isJournal } func (fbo *folderBranchOps) finalizeMDWriteLocked(ctx context.Context, lState *lockState, md *RootMetadata, bps *blockPutState, excl Excl, notifyFn func(ImmutableRootMetadata) error) ( err error) { fbo.mdWriterLock.AssertLocked(lState) // finally, write out the new metadata mdops := fbo.config.MDOps() doUnmergedPut := true mergedRev := kbfsmd.RevisionUninitialized oldPrevRoot := md.PrevRoot() var irmd ImmutableRootMetadata // This puts on a delay on any cancellations arriving to ctx. It is intended // to work sort of like a critical section, except that there isn't an // explicit call to exit the critical section. The cancellation, if any, is // triggered after a timeout (i.e. // fbo.config.DelayedCancellationGracePeriod()). // // The purpose of trying to avoid cancellation once we start MD write is to // avoid having an unpredictable perceived MD state. That is, when // runUnlessCanceled returns Canceled on cancellation, application receives // an EINTR, and would assume the operation didn't succeed. But the MD write // continues, and there's a chance the write will succeed, meaning the // operation succeeds. This contradicts with the application's perception // through error code and can lead to horrible situations. An easily caught // situation is when application calls Create with O_EXCL set, gets an EINTR // while MD write succeeds, retries and gets an EEXIST error. If users hit // Ctrl-C, this might not be a big deal. However, it also happens for other // interrupts. For applications that use signals to communicate, e.g. // SIGALRM and SIGUSR1, this can happen pretty often, which renders broken. if err = EnableDelayedCancellationWithGracePeriod( ctx, fbo.config.DelayedCancellationGracePeriod()); err != nil { return err } // we don't explicitly clean up (by using a defer) CancellationDelayer here // because sometimes fuse makes another call using the same ctx. For example, in // fuse's Create call handler, a dir.Create is followed by an Attr call. If // we do a deferred cleanup here, if an interrupt has been received, it can // cause ctx to be canceled before Attr call finishes, which causes FUSE to // return EINTR for the Create request. But at this point, the request may // have already succeeded. Returning EINTR makes application thinks the file // is not created successfully. err = fbo.finalizeBlocks(bps) if err != nil { return err } session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return err } if fbo.isMasterBranchLocked(lState) { // only do a normal Put if we're not already staged. irmd, err = mdops.Put( ctx, md, session.VerifyingKey, nil, keybase1.MDPriorityNormal) if doUnmergedPut = isRevisionConflict(err); doUnmergedPut { fbo.log.CDebugf(ctx, "Conflict: %v", err) mergedRev = md.Revision() if excl == WithExcl { // If this was caused by an exclusive create, we shouldn't do an // UnmergedPut, but rather try to get newest update from server, and // retry afterwards. err = fbo.getAndApplyMDUpdates(ctx, lState, nil, fbo.applyMDUpdatesLocked) if err != nil { return err } return ExclOnUnmergedError{} } } else if err != nil { return err } } else if excl == WithExcl { return ExclOnUnmergedError{} } doResolve := false resolveMergedRev := mergedRev if doUnmergedPut { // We're out of date, and this is not an exclusive write, so put it as an // unmerged MD. irmd, err = mdops.PutUnmerged(ctx, md, session.VerifyingKey) if isRevisionConflict(err) { // Self-conflicts are retried in `doMDWriteWithRetry`. return UnmergedSelfConflictError{err} } else if err != nil { // If a PutUnmerged fails, we are in a bad situation: if // we fail, but the put succeeded, then dirty data will // remain cached locally and will be re-tried // (non-idempotently) on the next sync call. This should // be a very rare situation when journaling is enabled, so // instead let's pretend it succeeded so that the cached // data is cleared and the nodeCache is updated. If we're // wrong, and the update didn't make it to the server, // then the next call will get an // UnmergedSelfConflictError but fail to find any new // updates and fail the operation, but things will get // fixed up once conflict resolution finally completes. // // TODO: how confused will the kernel cache get if the // pointers are updated but the file system operation // still gets an error returned by the wrapper function // that calls us (in the event of a user cancellation)? fbo.log.CInfof(ctx, "Ignoring a PutUnmerged error: %+v", err) err = encryptMDPrivateData( ctx, fbo.config.Codec(), fbo.config.Crypto(), fbo.config.Crypto(), fbo.config.KeyManager(), session.UID, md) if err != nil { return err } mdID, err := kbfsmd.MakeID(fbo.config.Codec(), md.bareMd) if err != nil { return err } irmd = MakeImmutableRootMetadata( md, session.VerifyingKey, mdID, fbo.config.Clock().Now(), true) err = fbo.config.MDCache().Put(irmd) if err != nil { return err } } bid := md.BID() fbo.setBranchIDLocked(lState, bid) doResolve = true } else { fbo.setBranchIDLocked(lState, NullBranchID) if md.IsRekeySet() && !md.IsWriterMetadataCopiedSet() { // Queue this folder for rekey if the bit was set and it's not a copy. // This is for the case where we're coming out of conflict resolution. // So why don't we do this in finalizeResolution? Well, we do but we don't // want to block on a rekey so we queue it. Because of that it may fail // due to a conflict with some subsequent write. By also handling it here // we'll always retry if we notice we haven't been successful in clearing // the bit yet. Note that I haven't actually seen this happen but it seems // theoretically possible. defer fbo.config.RekeyQueue().Enqueue(md.TlfID()) } } md.loadCachedBlockChanges(ctx, bps, fbo.log) rebased := (oldPrevRoot != md.PrevRoot()) if rebased { bid := md.BID() fbo.setBranchIDLocked(lState, bid) doResolve = true resolveMergedRev = kbfsmd.RevisionUninitialized } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased) if err != nil { return err } // Archive the old, unref'd blocks if journaling is off. if !TLFJournalEnabled(fbo.config, fbo.id()) { fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly()) } // Call Resolve() after the head is set, to make sure it fetches // the correct unmerged MD range during resolution. if doResolve { fbo.cr.Resolve(ctx, md.Revision(), resolveMergedRev) } if notifyFn != nil { err := notifyFn(irmd) if err != nil { return err } } return nil } func (fbo *folderBranchOps) waitForJournalLocked(ctx context.Context, lState *lockState, jServer *JournalServer) error { fbo.mdWriterLock.AssertLocked(lState) if !TLFJournalEnabled(fbo.config, fbo.id()) { // Nothing to do. return nil } if err := jServer.Wait(ctx, fbo.id()); err != nil { return err } // Make sure everything flushed successfully, since we're holding // the writer lock, no other revisions could have snuck in. jStatus, err := jServer.JournalStatus(fbo.id()) if err != nil { return err } if jStatus.RevisionEnd != kbfsmd.RevisionUninitialized { return errors.Errorf("Couldn't flush all MD revisions; current "+ "revision end for the journal is %d", jStatus.RevisionEnd) } if jStatus.LastFlushErr != "" { return errors.Errorf("Couldn't flush the journal: %s", jStatus.LastFlushErr) } return nil } func (fbo *folderBranchOps) finalizeMDRekeyWriteLocked(ctx context.Context, lState *lockState, md *RootMetadata, lastWriterVerifyingKey kbfscrypto.VerifyingKey) (err error) { fbo.mdWriterLock.AssertLocked(lState) oldPrevRoot := md.PrevRoot() // Write out the new metadata. If journaling is enabled, we don't // want the rekey to hit the journal and possibly end up on a // conflict branch, so wait for the journal to flush and then push // straight to the server. TODO: we're holding the writer lock // while flushing the journal here (just like for exclusive // writes), which may end up blocking incoming writes for a long // time. Rekeys are pretty rare, but if this becomes an issue // maybe we should consider letting these hit the journal and // scrubbing them when converting it to a branch. mdOps := fbo.config.MDOps() if jServer, err := GetJournalServer(fbo.config); err == nil { if err = fbo.waitForJournalLocked(ctx, lState, jServer); err != nil { return err } mdOps = jServer.delegateMDOps } var key kbfscrypto.VerifyingKey if md.IsWriterMetadataCopiedSet() { key = lastWriterVerifyingKey } else { var err error session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return err } key = session.VerifyingKey } irmd, err := mdOps.Put(ctx, md, key, nil, keybase1.MDPriorityNormal) isConflict := isRevisionConflict(err) if err != nil && !isConflict { return err } if isConflict { // Drop this block. We've probably collided with someone also // trying to rekey the same folder but that's not necessarily // the case. We'll queue another rekey just in case. It should // be safe as it's idempotent. We don't want any rekeys present // in unmerged history or that will just make a mess. fbo.config.RekeyQueue().Enqueue(md.TlfID()) return RekeyConflictError{err} } fbo.setBranchIDLocked(lState, NullBranchID) rebased := (oldPrevRoot != md.PrevRoot()) if rebased { bid := md.BID() fbo.setBranchIDLocked(lState, bid) fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized) } md.loadCachedBlockChanges(ctx, nil, fbo.log) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased) if err != nil { return err } // Explicitly set the latest merged revision, since if journaling // is on, `setHeadLocked` will not do it for us (even though // rekeys bypass the journal). fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false) return nil } func (fbo *folderBranchOps) finalizeGCOp(ctx context.Context, gco *GCOp) ( err error) { lState := makeFBOLockState() // Lock the folder so we can get an internally-consistent MD // revision number. fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState) if err != nil { return err } if md.MergedStatus() == Unmerged { return UnexpectedUnmergedPutError{} } md.AddOp(gco) // TODO: if the revision number of this new commit is sequential // with `LatestRev`, we can probably change this to // `gco.LatestRev+1`. md.SetLastGCRevision(gco.LatestRev) bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md) if err != nil { return err } oldPrevRoot := md.PrevRoot() err = fbo.finalizeBlocks(bps) if err != nil { return err } session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return err } // finally, write out the new metadata irmd, err := fbo.config.MDOps().Put( ctx, md, session.VerifyingKey, nil, keybase1.MDPriorityNormal) if err != nil { // Don't allow garbage collection to put us into a conflicting // state; just wait for the next period. return err } fbo.setBranchIDLocked(lState, NullBranchID) md.loadCachedBlockChanges(ctx, bps, fbo.log) rebased := (oldPrevRoot != md.PrevRoot()) if rebased { bid := md.BID() fbo.setBranchIDLocked(lState, bid) fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized) } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased) if err != nil { return err } return fbo.notifyBatchLocked(ctx, lState, irmd) } func checkDisallowedPrefixes(name string, mode InitMode) error { if mode == InitSingleOp { // Allow specialized, single-op KBFS programs (like the kbgit // remote helper) to bypass the disallowed prefix check. return nil } for _, prefix := range disallowedPrefixes { if strings.HasPrefix(name, prefix) { return DisallowedPrefixError{name, prefix} } } return nil } func (fbo *folderBranchOps) checkNewDirSize(ctx context.Context, lState *lockState, md ReadOnlyRootMetadata, dirPath path, newName string) error { // Check that the directory isn't past capacity already. var currSize uint64 if dirPath.hasValidParent() { de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, dirPath) if err != nil { return err } currSize = de.Size } else { // dirPath is just the root. currSize = md.data.Dir.Size } // Just an approximation since it doesn't include the size of the // directory entry itself, but that's ok -- at worst it'll be an // off-by-one-entry error, and since there's a maximum name length // we can't get in too much trouble. if currSize+uint64(len(newName)) > fbo.config.MaxDirBytes() { return DirTooBigError{dirPath, currSize + uint64(len(newName)), fbo.config.MaxDirBytes()} } return nil } // PathType returns path type func (fbo *folderBranchOps) PathType() PathType { switch fbo.folderBranch.Tlf.Type() { case tlf.Public: return PublicPathType case tlf.Private: return PrivatePathType case tlf.SingleTeam: return SingleTeamPathType default: panic(fmt.Sprintf("Unknown TLF type: %s", fbo.folderBranch.Tlf.Type())) } } // canonicalPath returns full canonical path for dir node and name. func (fbo *folderBranchOps) canonicalPath(ctx context.Context, dir Node, name string) (string, error) { dirPath, err := fbo.pathFromNodeForRead(dir) if err != nil { return "", err } return BuildCanonicalPath(fbo.PathType(), dirPath.String(), name), nil } func (fbo *folderBranchOps) signalWrite() { select { case fbo.syncNeededChan <- struct{}{}: // Kick off a merkle root fetch in the background, so that it's // ready by the time we do the SyncAll. fbo.merkleFetches.Add(1) go func() { defer fbo.merkleFetches.Done() newCtx := fbo.ctxWithFBOID(context.Background()) _, err := fbo.config.KBPKI().GetCurrentMerkleRoot(newCtx) if err != nil { fbo.log.CDebugf(newCtx, "Couldn't fetch merkle root: %+v", err) } }() default: } // A local write always means any ongoing CR should be canceled, // because the set of unmerged writes has changed. fbo.cr.ForceCancel() } func (fbo *folderBranchOps) syncDirUpdateOrSignal( ctx context.Context, lState *lockState) error { if fbo.config.BGFlushDirOpBatchSize() == 1 { return fbo.syncAllLocked(ctx, lState, NoExcl) } fbo.signalWrite() return nil } func (fbo *folderBranchOps) checkForUnlinkedDir(dir Node) error { // Disallow directory operations within an unlinked directory. // Shells don't seem to allow it, and it will just pollute the dir // entry cache with unsyncable entries. if fbo.nodeCache.IsUnlinked(dir) { dirPath := fbo.nodeCache.PathFromNode(dir).String() return errors.WithStack(UnsupportedOpInUnlinkedDirError{dirPath}) } return nil } // entryType must not by Sym. func (fbo *folderBranchOps) createEntryLocked( ctx context.Context, lState *lockState, dir Node, name string, entryType EntryType, excl Excl) (childNode Node, de DirEntry, err error) { fbo.mdWriterLock.AssertLocked(lState) if err := checkDisallowedPrefixes(name, fbo.config.Mode()); err != nil { return nil, DirEntry{}, err } if uint32(len(name)) > fbo.config.MaxNameBytes() { return nil, DirEntry{}, NameTooLongError{name, fbo.config.MaxNameBytes()} } if err := fbo.checkForUnlinkedDir(dir); err != nil { return nil, DirEntry{}, err } filename, err := fbo.canonicalPath(ctx, dir, name) if err != nil { return nil, DirEntry{}, err } // Verify we have permission to write (but don't make a successor yet). md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename) if err != nil { return nil, DirEntry{}, err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return nil, DirEntry{}, err } // We're not going to modify this copy of the dirblock, so just // fetch it for reading. dblock, err := fbo.blocks.GetDirtyDir( ctx, lState, md.ReadOnly(), dirPath, blockRead) if err != nil { return nil, DirEntry{}, err } // does name already exist? if _, ok := dblock.Children[name]; ok { return nil, DirEntry{}, NameExistsError{name} } if err := fbo.checkNewDirSize( ctx, lState, md.ReadOnly(), dirPath, name); err != nil { return nil, DirEntry{}, err } parentPtr := dirPath.tailPointer() co, err := newCreateOp(name, parentPtr, entryType) if err != nil { return nil, DirEntry{}, err } co.setFinalPath(dirPath) // create new data block var newBlock Block if entryType == Dir { newBlock = &DirBlock{ Children: make(map[string]DirEntry), } } else { newBlock = &FileBlock{} } // Cache update and operations until batch happens. Make a new // temporary ID and directory entry. newID, err := fbo.config.cryptoPure().MakeTemporaryBlockID() if err != nil { return nil, DirEntry{}, err } chargedTo, err := chargedToForTLF( ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), md.GetTlfHandle()) if err != nil { return nil, DirEntry{}, err } newPtr := BlockPointer{ ID: newID, KeyGen: md.LatestKeyGeneration(), DataVer: fbo.config.DataVersion(), DirectType: DirectBlock, Context: kbfsblock.MakeFirstContext( chargedTo, fbo.config.DefaultBlockType()), } co.AddRefBlock(newPtr) co.AddSelfUpdate(parentPtr) node, err := fbo.nodeCache.GetOrCreate(newPtr, name, dir) if err != nil { return nil, DirEntry{}, err } err = fbo.config.DirtyBlockCache().Put( fbo.id(), newPtr, fbo.branch(), newBlock) if err != nil { return nil, DirEntry{}, err } now := fbo.nowUnixNano() de = DirEntry{ BlockInfo: BlockInfo{ BlockPointer: newPtr, EncodedSize: 0, }, EntryInfo: EntryInfo{ Type: entryType, Size: 0, Mtime: now, Ctime: now, }, } dirCacheUndoFn := fbo.blocks.AddDirEntryInCache(lState, dirPath, name, de) fbo.dirOps = append(fbo.dirOps, cachedDirOp{co, []Node{dir, node}}) added := fbo.status.addDirtyNode(dir) cleanupFn := func() { if added { fbo.status.rmDirtyNode(dir) } fbo.dirOps = fbo.dirOps[:len(fbo.dirOps)-1] if dirCacheUndoFn != nil { dirCacheUndoFn(lState) } // Delete should never fail. _ = fbo.config.DirtyBlockCache().Delete(fbo.id(), newPtr, fbo.branch()) } defer func() { if err != nil && cleanupFn != nil { cleanupFn() } }() if entryType != Dir { // Dirty the file with a zero-byte write, to ensure the new // block is synced in SyncAll. TODO: remove this if we ever // embed 0-byte files in the directory entry itself. err = fbo.blocks.Write( ctx, lState, md.ReadOnly(), node, []byte{}, 0) if err != nil { return nil, DirEntry{}, err } oldCleanupFn := cleanupFn cleanupFn = func() { fbo.blocks.ClearCacheInfo(lState, fbo.nodeCache.PathFromNode(node)) oldCleanupFn() } } // It's safe to notify before we've synced, since it is only // sending invalidation notifications. At worst the upper layer // will just have to refresh its cache needlessly. err = fbo.notifyOneOp(ctx, lState, co, md.ReadOnly(), false) if err != nil { return nil, DirEntry{}, err } if excl == WithExcl { // Sync this change to the server. err := fbo.syncAllLocked(ctx, lState, WithExcl) _, isNoUpdatesWhileDirty := errors.Cause(err).(NoUpdatesWhileDirtyError) if isNoUpdatesWhileDirty { // If an exclusive write hits a conflict, it will try to // update, but won't be able to because of the dirty // directory entries. We need to clean up the dirty // entries here first before trying to apply the updates // again. By returning `ExclOnUnmergedError` below, we // force the caller to retry the whole operation again. fbo.log.CDebugf(ctx, "Clearing dirty entry before applying new "+ "updates for exclusive write") cleanupFn() cleanupFn = nil // Sync anything else that might be buffered (non-exclusively). err = fbo.syncAllLocked(ctx, lState, NoExcl) if err != nil { return nil, DirEntry{}, err } // Now we should be in a clean state, so this should work. err = fbo.getAndApplyMDUpdates( ctx, lState, nil, fbo.applyMDUpdatesLocked) if err != nil { return nil, DirEntry{}, err } return nil, DirEntry{}, ExclOnUnmergedError{} } else if err != nil { return nil, DirEntry{}, err } } else { err = fbo.syncDirUpdateOrSignal(ctx, lState) if err != nil { return nil, DirEntry{}, err } } return node, de, nil } func (fbo *folderBranchOps) maybeWaitForSquash( ctx context.Context, bid BranchID) { if bid != PendingLocalSquashBranchID { return } fbo.log.CDebugf(ctx, "Blocking until squash finishes") // Limit the time we wait to just under the ctx deadline if there // is one, or 10s if there isn't. deadline, ok := ctx.Deadline() if ok { deadline = deadline.Add(-1 * time.Second) } else { // Can't use config.Clock() since context doesn't respect it. deadline = time.Now().Add(10 * time.Second) } ctx, cancel := context.WithDeadline(ctx, deadline) defer cancel() // Wait for CR to finish. Note that if the user is issuing // concurrent writes, the current CR could be canceled, and when // the call belows returns, the branch still won't be squashed. // That's ok, this is just an optimization. err := fbo.cr.Wait(ctx) if err != nil { fbo.log.CDebugf(ctx, "Error while waiting for CR: %+v", err) } } func (fbo *folderBranchOps) doMDWriteWithRetry(ctx context.Context, lState *lockState, fn func(lState *lockState) error) error { doUnlock := false defer func() { if doUnlock { bid := fbo.bid fbo.mdWriterLock.Unlock(lState) // Don't let a pending squash get too big. fbo.maybeWaitForSquash(ctx, bid) } }() for i := 0; ; i++ { fbo.mdWriterLock.Lock(lState) doUnlock = true // Make sure we haven't been canceled before doing anything // too serious. select { case <-ctx.Done(): return ctx.Err() default: } err := fn(lState) if isRetriableError(err, i) { fbo.log.CDebugf(ctx, "Trying again after retriable error: %v", err) // Release the lock to give someone else a chance doUnlock = false fbo.mdWriterLock.Unlock(lState) if _, ok := err.(ExclOnUnmergedError); ok { if err = fbo.cr.Wait(ctx); err != nil { return err } } else if _, ok := err.(UnmergedSelfConflictError); ok { // We can only get here if we are already on an // unmerged branch and an errored PutUnmerged did make // it to the mdserver. Let's force sync, with a fresh // context so the observer doesn't ignore the updates // (but tie the cancels together). newCtx := fbo.ctxWithFBOID(context.Background()) newCtx, cancel := context.WithCancel(newCtx) defer cancel() go func() { select { case <-ctx.Done(): cancel() case <-newCtx.Done(): } }() fbo.log.CDebugf(ctx, "Got a revision conflict while unmerged "+ "(%v); forcing a sync", err) err = fbo.getAndApplyNewestUnmergedHead(newCtx, lState) if err != nil { // TODO: we might be stuck at this point if we're // ahead of the unmerged branch on the server, in // which case we might want to just abandon any // cached updates and force a sync to the head. return err } cancel() } continue } else if err != nil { return err } return nil } } func (fbo *folderBranchOps) doMDWriteWithRetryUnlessCanceled( ctx context.Context, fn func(lState *lockState) error) error { return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() return fbo.doMDWriteWithRetry(ctx, lState, fn) }) } func (fbo *folderBranchOps) CreateDir( ctx context.Context, dir Node, path string) ( n Node, ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "CreateDir %s %s", getNodeIDStr(dir), path) defer func() { fbo.deferLog.CDebugf(ctx, "CreateDir %s %s done: %v %+v", getNodeIDStr(dir), path, getNodeIDStr(n), err) }() err = fbo.checkNode(dir) if err != nil { return nil, EntryInfo{}, err } var retNode Node var retEntryInfo EntryInfo err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { node, de, err := fbo.createEntryLocked(ctx, lState, dir, path, Dir, NoExcl) // Don't set node and ei directly, as that can cause a // race when the Create is canceled. retNode = node retEntryInfo = de.EntryInfo return err }) if err != nil { return nil, EntryInfo{}, err } return retNode, retEntryInfo, nil } func (fbo *folderBranchOps) CreateFile( ctx context.Context, dir Node, path string, isExec bool, excl Excl) ( n Node, ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "CreateFile %s %s isExec=%v Excl=%s", getNodeIDStr(dir), path, isExec, excl) defer func() { fbo.deferLog.CDebugf(ctx, "CreateFile %s %s isExec=%v Excl=%s done: %v %+v", getNodeIDStr(dir), path, isExec, excl, getNodeIDStr(n), err) }() err = fbo.checkNode(dir) if err != nil { return nil, EntryInfo{}, err } var entryType EntryType if isExec { entryType = Exec } else { entryType = File } // If journaling is turned on, an exclusive create may end up on a // conflict branch. if excl == WithExcl && TLFJournalEnabled(fbo.config, fbo.id()) { fbo.log.CDebugf(ctx, "Exclusive create status is being discarded.") excl = NoExcl } if excl == WithExcl { if err = fbo.cr.Wait(ctx); err != nil { return nil, EntryInfo{}, err } } var retNode Node var retEntryInfo EntryInfo err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { // Don't set node and ei directly, as that can cause a // race when the Create is canceled. node, de, err := fbo.createEntryLocked(ctx, lState, dir, path, entryType, excl) retNode = node retEntryInfo = de.EntryInfo return err }) if err != nil { return nil, EntryInfo{}, err } return retNode, retEntryInfo, nil } // notifyAndSyncOrSignal caches an op in memory and dirties the // relevant node, and then sends a notification for it. If batching // is on, it signals the write; otherwise it syncs the change. It // should only be called as the final instruction that can fail in a // method. func (fbo *folderBranchOps) notifyAndSyncOrSignal( ctx context.Context, lState *lockState, undoFn dirCacheUndoFn, nodesToDirty []Node, op op, md ReadOnlyRootMetadata) (err error) { fbo.dirOps = append(fbo.dirOps, cachedDirOp{op, nodesToDirty}) var addedNodes []Node for _, n := range nodesToDirty { added := fbo.status.addDirtyNode(n) if added { addedNodes = append(addedNodes, n) } } defer func() { if err != nil { for _, n := range addedNodes { fbo.status.rmDirtyNode(n) } fbo.dirOps = fbo.dirOps[:len(fbo.dirOps)-1] if undoFn != nil { undoFn(lState) } } }() // It's safe to notify before we've synced, since it is only // sending invalidation notifications. At worst the upper layer // will just have to refresh its cache needlessly. err = fbo.notifyOneOp(ctx, lState, op, md, false) if err != nil { return err } return fbo.syncDirUpdateOrSignal(ctx, lState) } func (fbo *folderBranchOps) createLinkLocked( ctx context.Context, lState *lockState, dir Node, fromName string, toPath string) (DirEntry, error) { fbo.mdWriterLock.AssertLocked(lState) if err := checkDisallowedPrefixes(fromName, fbo.config.Mode()); err != nil { return DirEntry{}, err } if uint32(len(fromName)) > fbo.config.MaxNameBytes() { return DirEntry{}, NameTooLongError{fromName, fbo.config.MaxNameBytes()} } if err := fbo.checkForUnlinkedDir(dir); err != nil { return DirEntry{}, err } // Verify we have permission to write (but don't make a successor yet). md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "") if err != nil { return DirEntry{}, err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return DirEntry{}, err } // We're not going to modify this copy of the dirblock, so just // fetch it for reading. dblock, err := fbo.blocks.GetDirtyDir( ctx, lState, md.ReadOnly(), dirPath, blockRead) if err != nil { return DirEntry{}, err } // TODO: validate inputs // does name already exist? if _, ok := dblock.Children[fromName]; ok { return DirEntry{}, NameExistsError{fromName} } if err := fbo.checkNewDirSize(ctx, lState, md.ReadOnly(), dirPath, fromName); err != nil { return DirEntry{}, err } parentPtr := dirPath.tailPointer() co, err := newCreateOp(fromName, parentPtr, Sym) if err != nil { return DirEntry{}, err } co.setFinalPath(dirPath) co.AddSelfUpdate(parentPtr) // Nothing below here can fail, so no need to clean up the dir // entry cache on a failure. If this ever panics, we need to add // cleanup code. // Create a direntry for the link, and then sync now := fbo.nowUnixNano() de := DirEntry{ EntryInfo: EntryInfo{ Type: Sym, Size: uint64(len(toPath)), SymPath: toPath, Mtime: now, Ctime: now, }, } dirCacheUndoFn := fbo.blocks.AddDirEntryInCache( lState, dirPath, fromName, de) err = fbo.notifyAndSyncOrSignal( ctx, lState, dirCacheUndoFn, []Node{dir}, co, md.ReadOnly()) if err != nil { return DirEntry{}, err } return de, nil } func (fbo *folderBranchOps) CreateLink( ctx context.Context, dir Node, fromName string, toPath string) ( ei EntryInfo, err error) { fbo.log.CDebugf(ctx, "CreateLink %s %s -> %s", getNodeIDStr(dir), fromName, toPath) defer func() { fbo.deferLog.CDebugf(ctx, "CreateLink %s %s -> %s done: %+v", getNodeIDStr(dir), fromName, toPath, err) }() err = fbo.checkNode(dir) if err != nil { return EntryInfo{}, err } var retEntryInfo EntryInfo err = fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { // Don't set ei directly, as that can cause a race when // the Create is canceled. de, err := fbo.createLinkLocked(ctx, lState, dir, fromName, toPath) retEntryInfo = de.EntryInfo return err }) if err != nil { return EntryInfo{}, err } return retEntryInfo, nil } // unrefEntry modifies md to unreference all relevant blocks for the // given entry. func (fbo *folderBranchOps) unrefEntryLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, ro op, dir path, de DirEntry, name string) error { fbo.mdWriterLock.AssertLocked(lState) if de.Type == Sym { return nil } unrefsToAdd := make(map[BlockPointer]bool) fbo.prepper.cacheBlockInfos([]BlockInfo{de.BlockInfo}) unrefsToAdd[de.BlockPointer] = true // construct a path for the child so we can unlink with it. childPath := dir.ChildPath(name, de.BlockPointer) // If this is an indirect block, we need to delete all of its // children as well. NOTE: non-empty directories can't be // removed, so no need to check for indirect directory blocks // here. if de.Type == File || de.Type == Exec { blockInfos, err := fbo.blocks.GetIndirectFileBlockInfos( ctx, lState, kmd, childPath) if isRecoverableBlockErrorForRemoval(err) { msg := fmt.Sprintf("Recoverable block error encountered for unrefEntry(%v); continuing", childPath) fbo.log.CWarningf(ctx, "%s", msg) fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err) } else if err != nil { return err } fbo.prepper.cacheBlockInfos(blockInfos) for _, blockInfo := range blockInfos { unrefsToAdd[blockInfo.BlockPointer] = true } } // Any referenced blocks that were unreferenced since the last // sync can just be forgotten about. Note that any updated // pointers that are unreferenced will be fixed up during syncing. for _, dirOp := range fbo.dirOps { for i := len(dirOp.dirOp.Refs()) - 1; i >= 0; i-- { ref := dirOp.dirOp.Refs()[i] if _, ok := unrefsToAdd[ref]; ok { dirOp.dirOp.DelRefBlock(ref) delete(unrefsToAdd, ref) } } } for unref := range unrefsToAdd { ro.AddUnrefBlock(unref) } return nil } func (fbo *folderBranchOps) removeEntryLocked(ctx context.Context, lState *lockState, md ReadOnlyRootMetadata, dir Node, dirPath path, name string) error { fbo.mdWriterLock.AssertLocked(lState) if err := fbo.checkForUnlinkedDir(dir); err != nil { return err } // We're not going to modify this copy of the dirblock, so just // fetch it for reading. pblock, err := fbo.blocks.GetDirtyDir(ctx, lState, md, dirPath, blockRead) if err != nil { return err } // make sure the entry exists de, ok := pblock.Children[name] if !ok { return NoSuchNameError{name} } parentPtr := dirPath.tailPointer() ro, err := newRmOp(name, parentPtr) if err != nil { return err } ro.setFinalPath(dirPath) ro.AddSelfUpdate(parentPtr) err = fbo.unrefEntryLocked(ctx, lState, md, ro, dirPath, de, name) if err != nil { return err } dirCacheUndoFn := fbo.blocks.RemoveDirEntryInCache( lState, dirPath, name, de) if de.Type == Dir { removedNode := fbo.nodeCache.Get(de.BlockPointer.Ref()) if removedNode != nil { // If it was a dirty directory, the removed node no longer // counts as dirty (it will never be sync'd). Note that // removed files will still be synced since any data // written to them via a handle stays in memory until the // sync actually happens. removed := fbo.status.rmDirtyNode(removedNode) if removed { oldUndoFn := dirCacheUndoFn dirCacheUndoFn = func(lState *lockState) { oldUndoFn(lState) fbo.status.addDirtyNode(removedNode) } } } } return fbo.notifyAndSyncOrSignal( ctx, lState, dirCacheUndoFn, []Node{dir}, ro, md.ReadOnly()) } func (fbo *folderBranchOps) removeDirLocked(ctx context.Context, lState *lockState, dir Node, dirName string) (err error) { fbo.mdWriterLock.AssertLocked(lState) // Verify we have permission to write (but don't make a successor yet). md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "") if err != nil { return err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return err } pblock, err := fbo.blocks.GetDirtyDir( ctx, lState, md.ReadOnly(), dirPath, blockRead) de, ok := pblock.Children[dirName] if !ok { return NoSuchNameError{dirName} } // construct a path for the child so we can check for an empty dir childPath := dirPath.ChildPath(dirName, de.BlockPointer) childBlock, err := fbo.blocks.GetDirtyDir( ctx, lState, md.ReadOnly(), childPath, blockRead) if isRecoverableBlockErrorForRemoval(err) { msg := fmt.Sprintf("Recoverable block error encountered for removeDirLocked(%v); continuing", childPath) fbo.log.CWarningf(ctx, "%s", msg) fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err) } else if err != nil { return err } else if len(childBlock.Children) > 0 { return DirNotEmptyError{dirName} } return fbo.removeEntryLocked( ctx, lState, md.ReadOnly(), dir, dirPath, dirName) } func (fbo *folderBranchOps) RemoveDir( ctx context.Context, dir Node, dirName string) (err error) { fbo.log.CDebugf(ctx, "RemoveDir %s %s", getNodeIDStr(dir), dirName) defer func() { fbo.deferLog.CDebugf(ctx, "RemoveDir %s %s done: %+v", getNodeIDStr(dir), dirName, err) }() err = fbo.checkNode(dir) if err != nil { return } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { return fbo.removeDirLocked(ctx, lState, dir, dirName) }) } func (fbo *folderBranchOps) RemoveEntry(ctx context.Context, dir Node, name string) (err error) { fbo.log.CDebugf(ctx, "RemoveEntry %s %s", getNodeIDStr(dir), name) defer func() { fbo.deferLog.CDebugf(ctx, "RemoveEntry %s %s done: %+v", getNodeIDStr(dir), name, err) }() err = fbo.checkNode(dir) if err != nil { return err } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { // Verify we have permission to write (but no need to make // a successor yet). md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "") if err != nil { return err } dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir) if err != nil { return err } return fbo.removeEntryLocked( ctx, lState, md.ReadOnly(), dir, dirPath, name) }) } func (fbo *folderBranchOps) renameLocked( ctx context.Context, lState *lockState, oldParent Node, oldName string, newParent Node, newName string) (err error) { fbo.mdWriterLock.AssertLocked(lState) if err := fbo.checkForUnlinkedDir(oldParent); err != nil { return err } if err := fbo.checkForUnlinkedDir(newParent); err != nil { return err } if err := checkDisallowedPrefixes(newName, fbo.config.Mode()); err != nil { return err } oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent) if err != nil { return err } newParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, newParent) if err != nil { return err } // Verify we have permission to write (but no need to make a // successor yet). md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "") if err != nil { return err } _, newPBlock, newDe, ro, err := fbo.blocks.PrepRename( ctx, lState, md.ReadOnly(), oldParentPath, oldName, newParentPath, newName) if err != nil { return err } // does name exist? replacedDe, ok := newPBlock.Children[newName] if ok { // Usually higher-level programs check these, but just in case. if replacedDe.Type == Dir && newDe.Type != Dir { return NotDirError{newParentPath.ChildPathNoPtr(newName)} } else if replacedDe.Type != Dir && newDe.Type == Dir { return NotFileError{newParentPath.ChildPathNoPtr(newName)} } if replacedDe.Type == Dir { // The directory must be empty. oldTargetDir, err := fbo.blocks.GetDirBlockForReading(ctx, lState, md.ReadOnly(), replacedDe.BlockPointer, newParentPath.Branch, newParentPath.ChildPathNoPtr(newName)) if err != nil { return err } if len(oldTargetDir.Children) != 0 { fbo.log.CWarningf(ctx, "Renaming over a non-empty directory "+ " (%s/%s) not allowed.", newParentPath, newName) return DirNotEmptyError{newName} } } // Delete the old block pointed to by this direntry. err := fbo.unrefEntryLocked( ctx, lState, md.ReadOnly(), ro, newParentPath, replacedDe, newName) if err != nil { return err } } else { // If the entry doesn't exist yet, see if the new name will // make the new parent directory too big. If the entry is // remaining in the same directory, only check the size // difference. checkName := newName if oldParent == newParent { if extra := len(newName) - len(oldName); extra <= 0 { checkName = "" } else { checkName = newName[:extra] } } if len(checkName) > 0 { if err := fbo.checkNewDirSize( ctx, lState, md.ReadOnly(), newParentPath, checkName); err != nil { return err } } } // Only the ctime changes on the directory entry itself. newDe.Ctime = fbo.nowUnixNano() dirCacheUndoFn, err := fbo.blocks.RenameDirEntryInCache( lState, oldParentPath, oldName, newParentPath, newName, newDe, replacedDe) if err != nil { return err } nodesToDirty := []Node{oldParent} if oldParent.GetID() != newParent.GetID() { nodesToDirty = append(nodesToDirty, newParent) } return fbo.notifyAndSyncOrSignal( ctx, lState, dirCacheUndoFn, nodesToDirty, ro, md.ReadOnly()) } func (fbo *folderBranchOps) Rename( ctx context.Context, oldParent Node, oldName string, newParent Node, newName string) (err error) { fbo.log.CDebugf(ctx, "Rename %s/%s -> %s/%s", getNodeIDStr(oldParent), oldName, getNodeIDStr(newParent), newName) defer func() { fbo.deferLog.CDebugf(ctx, "Rename %s/%s -> %s/%s done: %+v", getNodeIDStr(oldParent), oldName, getNodeIDStr(newParent), newName, err) }() err = fbo.checkNode(newParent) if err != nil { return err } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { // only works for paths within the same topdir if oldParent.GetFolderBranch() != newParent.GetFolderBranch() { return RenameAcrossDirsError{} } return fbo.renameLocked(ctx, lState, oldParent, oldName, newParent, newName) }) } func (fbo *folderBranchOps) Read( ctx context.Context, file Node, dest []byte, off int64) ( n int64, err error) { fbo.log.CDebugf(ctx, "Read %s %d %d", getNodeIDStr(file), len(dest), off) defer func() { fbo.deferLog.CDebugf(ctx, "Read %s %d %d (n=%d) done: %+v", getNodeIDStr(file), len(dest), off, n, err) }() err = fbo.checkNode(file) if err != nil { return 0, err } { filePath, err := fbo.pathFromNodeForRead(file) if err != nil { return 0, err } // It seems git isn't handling EINTR from some of its read calls (likely // fread), which causes it to get corrupted data (which leads to coredumps // later) when a read system call on pack files gets interrupted. This // enables delayed cancellation for Read if the file path contains `.git`. // // TODO: get a patch in git, wait for sufficiently long time for people to // upgrade, and remove this. // allow turning this feature off by env var to make life easier when we // try to fix git. if _, isSet := os.LookupEnv("KBFS_DISABLE_GIT_SPECIAL_CASE"); !isSet { for _, n := range filePath.path { if n.Name == ".git" { EnableDelayedCancellationWithGracePeriod(ctx, fbo.config.DelayedCancellationGracePeriod()) break } } } } // Don't let the goroutine below write directly to the return // variable, since if the context is canceled the goroutine might // outlast this function call, and end up in a read/write race // with the caller. var bytesRead int64 err = runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() // verify we have permission to read md, err := fbo.getMDForReadNeedIdentify(ctx, lState) if err != nil { return err } // Read using the `file` Node, not `filePath`, since the path // could change until we take `blockLock` for reading. bytesRead, err = fbo.blocks.Read( ctx, lState, md.ReadOnly(), file, dest, off) return err }) if err != nil { return 0, err } return bytesRead, nil } func (fbo *folderBranchOps) Write( ctx context.Context, file Node, data []byte, off int64) (err error) { fbo.log.CDebugf(ctx, "Write %s %d %d", getNodeIDStr(file), len(data), off) defer func() { fbo.deferLog.CDebugf(ctx, "Write %s %d %d done: %+v", getNodeIDStr(file), len(data), off, err) }() err = fbo.checkNode(file) if err != nil { return err } return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() // Get the MD for reading. We won't modify it; we'll track the // unref changes on the side, and put them into the MD during the // sync. md, err := fbo.getMDForReadLocked(ctx, lState, mdReadNeedIdentify) if err != nil { return err } err = fbo.blocks.Write( ctx, lState, md.ReadOnly(), file, data, off) if err != nil { return err } fbo.status.addDirtyNode(file) fbo.signalWrite() return nil }) } func (fbo *folderBranchOps) Truncate( ctx context.Context, file Node, size uint64) (err error) { fbo.log.CDebugf(ctx, "Truncate %s %d", getNodeIDStr(file), size) defer func() { fbo.deferLog.CDebugf(ctx, "Truncate %s %d done: %+v", getNodeIDStr(file), size, err) }() err = fbo.checkNode(file) if err != nil { return err } return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() // Get the MD for reading. We won't modify it; we'll track the // unref changes on the side, and put them into the MD during the // sync. md, err := fbo.getMDForReadLocked(ctx, lState, mdReadNeedIdentify) if err != nil { return err } err = fbo.blocks.Truncate( ctx, lState, md.ReadOnly(), file, size) if err != nil { return err } fbo.status.addDirtyNode(file) fbo.signalWrite() return nil }) } func (fbo *folderBranchOps) setExLocked( ctx context.Context, lState *lockState, file Node, ex bool) (err error) { fbo.mdWriterLock.AssertLocked(lState) filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file) if err != nil { return err } // Verify we have permission to write (no need to make a successor yet). md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "") if err != nil { return } de, err := fbo.blocks.GetDirtyEntryEvenIfDeleted( ctx, lState, md.ReadOnly(), filePath) if err != nil { return err } // If the file is a symlink, do nothing (to match ext4 // behavior). if de.Type == Sym || de.Type == Dir { fbo.log.CDebugf(ctx, "Ignoring setex on type %s", de.Type) return nil } if ex && (de.Type == File) { de.Type = Exec } else if !ex && (de.Type == Exec) { de.Type = File } else { // Treating this as a no-op, without updating the ctime, is a // POSIX violation, but it's an important optimization to keep // permissions-preserving rsyncs fast. fbo.log.CDebugf(ctx, "Ignoring no-op setex") return nil } de.Ctime = fbo.nowUnixNano() parentPtr := filePath.parentPath().tailPointer() sao, err := newSetAttrOp(filePath.tailName(), parentPtr, exAttr, filePath.tailPointer()) if err != nil { return err } sao.AddSelfUpdate(parentPtr) // If the node has been unlinked, we can safely ignore this setex. if fbo.nodeCache.IsUnlinked(file) { fbo.log.CDebugf(ctx, "Skipping setex for a removed file %v", filePath.tailPointer()) fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile( ctx, lState, sao, de) return nil } sao.setFinalPath(filePath) dirCacheUndoFn := fbo.blocks.SetAttrInDirEntryInCache( lState, filePath, de, sao.Attr) return fbo.notifyAndSyncOrSignal( ctx, lState, dirCacheUndoFn, []Node{file}, sao, md.ReadOnly()) } func (fbo *folderBranchOps) SetEx( ctx context.Context, file Node, ex bool) (err error) { fbo.log.CDebugf(ctx, "SetEx %s %t", getNodeIDStr(file), ex) defer func() { fbo.deferLog.CDebugf(ctx, "SetEx %s %t done: %+v", getNodeIDStr(file), ex, err) }() err = fbo.checkNode(file) if err != nil { return } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { return fbo.setExLocked(ctx, lState, file, ex) }) } func (fbo *folderBranchOps) setMtimeLocked( ctx context.Context, lState *lockState, file Node, mtime *time.Time) error { fbo.mdWriterLock.AssertLocked(lState) filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file) if err != nil { return err } // Verify we have permission to write (no need to make a successor yet). md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "") if err != nil { return err } de, err := fbo.blocks.GetDirtyEntryEvenIfDeleted( ctx, lState, md.ReadOnly(), filePath) if err != nil { return err } de.Mtime = mtime.UnixNano() // setting the mtime counts as changing the file MD, so must set ctime too de.Ctime = fbo.nowUnixNano() parentPtr := filePath.parentPath().tailPointer() sao, err := newSetAttrOp(filePath.tailName(), parentPtr, mtimeAttr, filePath.tailPointer()) if err != nil { return err } sao.AddSelfUpdate(parentPtr) // If the node has been unlinked, we can safely ignore this // setmtime. if fbo.nodeCache.IsUnlinked(file) { fbo.log.CDebugf(ctx, "Skipping setmtime for a removed file %v", filePath.tailPointer()) fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile( ctx, lState, sao, de) return nil } sao.setFinalPath(filePath) dirCacheUndoFn := fbo.blocks.SetAttrInDirEntryInCache( lState, filePath, de, sao.Attr) return fbo.notifyAndSyncOrSignal( ctx, lState, dirCacheUndoFn, []Node{file}, sao, md.ReadOnly()) } func (fbo *folderBranchOps) SetMtime( ctx context.Context, file Node, mtime *time.Time) (err error) { fbo.log.CDebugf(ctx, "SetMtime %s %v", getNodeIDStr(file), mtime) defer func() { fbo.deferLog.CDebugf(ctx, "SetMtime %s %v done: %+v", getNodeIDStr(file), mtime, err) }() if mtime == nil { // Can happen on some OSes (e.g. OSX) when trying to set the atime only return nil } err = fbo.checkNode(file) if err != nil { return } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { return fbo.setMtimeLocked(ctx, lState, file, mtime) }) } type cleanupFn func(context.Context, *lockState, []BlockPointer, error) // startSyncLocked readies the blocks and other state needed to sync a // single file. It returns: // // * `doSync`: Whether or not the sync should actually happen. // * `stillDirty`: Whether the file should still be considered dirty when // this function returns. (That is, if `doSync` is false, and `stillDirty` // is true, then the file has outstanding changes but the sync was vetoed for // some other reason.) // * `fblock`: the root file block for the file being sync'd. // * `lbc`: A local block cache consisting of a dirtied version of the parent // directory for this file. // * `bps`: All the blocks that need to be put to the server. // * `syncState`: Must be passed to the `FinishSyncLocked` call after the // update completes. // * `cleanupFn`: A function that, if non-nil, must be called after the sync // is done. `cleanupFn` should be passed the set of bad blocks that couldn't // be sync'd (if any), and the error. // * `err`: The best, greatest return value, everyone says it's absolutely // stunning. func (fbo *folderBranchOps) startSyncLocked(ctx context.Context, lState *lockState, md *RootMetadata, node Node, file path) ( doSync, stillDirty bool, fblock *FileBlock, lbc localBcache, bps *blockPutState, syncState fileSyncState, cleanup cleanupFn, err error) { fbo.mdWriterLock.AssertLocked(lState) // if the cache for this file isn't dirty, we're done if !fbo.blocks.IsDirty(lState, file) { return false, false, nil, nil, nil, fileSyncState{}, nil, nil } // If the MD doesn't match the MD expected by the path, that // implies we are using a cached path, which implies the node has // been unlinked. In that case, we can safely ignore this sync. if fbo.nodeCache.IsUnlinked(node) { fbo.log.CDebugf(ctx, "Skipping sync for a removed file %v", file.tailPointer()) // Removing the cached info here is a little sketchy, // since there's no guarantee that this sync comes // from closing the file, and we still want to serve // stat calls accurately if the user still has an open // handle to this file. // // Note in particular that if a file just had a dirty // directory entry cached (due to an attribute change on a // removed file, for example), this will clear that attribute // change. If there's still an open file handle, the user // won't be able to see the change anymore. // // TODO: Hook this in with the node cache GC logic to be // perfectly accurate (but at the same time, we'd then have to // fix up the intentional panic in the background flusher to // be more tolerant of long-lived dirty, removed files). err := fbo.blocks.ClearCacheInfo(lState, file) if err != nil { return false, false, nil, nil, nil, fileSyncState{}, nil, err } fbo.status.rmDirtyNode(node) return false, true, nil, nil, nil, fileSyncState{}, nil, nil } if file.isValidForNotification() { // notify the daemon that a write is being performed fbo.config.Reporter().Notify(ctx, writeNotification(file, false)) defer fbo.config.Reporter().Notify(ctx, writeNotification(file, true)) } fblock, bps, lbc, syncState, err = fbo.blocks.StartSync(ctx, lState, md, file) cleanup = func(ctx context.Context, lState *lockState, blocksToRemove []BlockPointer, err error) { fbo.blocks.CleanupSyncState( ctx, lState, md.ReadOnly(), file, blocksToRemove, syncState, err) } if err != nil { return false, true, nil, nil, nil, fileSyncState{}, cleanup, err } return true, true, fblock, lbc, bps, syncState, cleanup, nil } func addSelfUpdatesAndParent( p path, op op, parentsToAddChainsFor map[BlockPointer]bool) { for i, pn := range p.path { if i == len(p.path)-1 { op.AddSelfUpdate(pn.BlockPointer) } else { parentsToAddChainsFor[pn.BlockPointer] = true } } } func (fbo *folderBranchOps) syncAllLocked( ctx context.Context, lState *lockState, excl Excl) (err error) { fbo.mdWriterLock.AssertLocked(lState) dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState) dirtyDirs := fbo.blocks.GetDirtyDirBlockRefs(lState) if len(dirtyFiles) == 0 && len(dirtyDirs) == 0 { return nil } ctx = fbo.config.MaybeStartTrace(ctx, "FBO.SyncAll", fmt.Sprintf("%d files, %d dirs", len(dirtyFiles), len(dirtyDirs))) defer func() { fbo.config.MaybeFinishTrace(ctx, err) }() // Verify we have permission to write. We do this after the dirty // check because otherwise readers who call syncAll would get an // error. md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState) if err != nil { return err } bps := newBlockPutState(0) resolvedPaths := make(map[BlockPointer]path) lbc := make(localBcache) var cleanups []func(context.Context, *lockState, error) defer func() { for _, cf := range cleanups { cf(ctx, lState, err) } }() fbo.log.LazyTrace(ctx, "Syncing %d dir(s)", len(dirtyDirs)) // First prep all the directories. fbo.log.CDebugf(ctx, "Syncing %d dir(s)", len(dirtyDirs)) for _, ref := range dirtyDirs { node := fbo.nodeCache.Get(ref) if node == nil { continue } dir := fbo.nodeCache.PathFromNode(node) dblock, err := fbo.blocks.GetDirtyDir(ctx, lState, md, dir, blockWrite) if err != nil { return err } lbc[dir.tailPointer()] = dblock if !fbo.nodeCache.IsUnlinked(node) { resolvedPaths[dir.tailPointer()] = dir } // On a successful sync, clean up the cached entries and the // dirty blocks. cleanups = append(cleanups, func(ctx context.Context, lState *lockState, err error) { if err != nil { return } fbo.blocks.ClearCachedDirEntry(lState, dir) fbo.status.rmDirtyNode(node) }) } defer func() { // If the sync is successful, we can clear out all buffered // directory operations. if err == nil { fbo.dirOps = nil } }() fbo.log.LazyTrace(ctx, "Processing %d op(s)", len(fbo.dirOps)) newBlocks := make(map[BlockPointer]bool) fileBlocks := make(fileBlockMap) parentsToAddChainsFor := make(map[BlockPointer]bool) for _, dop := range fbo.dirOps { // Copy the op before modifying it, in case there's an error // and we have to retry with the original ops. newOp := dop.dirOp.deepCopy() md.AddOp(newOp) // Add "updates" for all the op updates, and make chains for // the rest of the parent directories, so they're treated like // updates during the prepping. for _, n := range dop.nodes { p := fbo.nodeCache.PathFromNode(n) if _, ok := newOp.(*setAttrOp); ok { // For a setattr, the node is the file, but that // doesn't get updated, so use the current parent // node. p = *p.parentPath() } addSelfUpdatesAndParent(p, newOp, parentsToAddChainsFor) } var ref BlockRef switch realOp := newOp.(type) { case *createOp: if realOp.Type == Sym { continue } // New files and directories explicitly need // pointer-updating, because the sync process will turn // them into simple refs and will forget about the local, // temporary ID. newNode := dop.nodes[1] newPath := fbo.nodeCache.PathFromNode(newNode) newPointer := newPath.tailPointer() newBlocks[newPointer] = true if realOp.Type != Dir { continue } dblock, ok := lbc[newPointer] if !ok { // New directories that aren't otherwise dirty need to // be added to both the `lbc` and `resolvedPaths` so // they are properly synced. dblock, err = fbo.blocks.GetDirtyDir( ctx, lState, md, newPath, blockWrite) if err != nil { return err } lbc[newPointer] = dblock if !fbo.nodeCache.IsUnlinked(newNode) { resolvedPaths[newPointer] = newPath } } if len(dblock.Children) > 0 { continue } // If the directory is empty, we need to explicitly clean // up its entry after syncing. ref = newPath.tailRef() case *renameOp: ref = realOp.Renamed.Ref() case *setAttrOp: ref = realOp.File.Ref() default: continue } // For create, rename and setattr ops, the target will have a // dirty entry, but may not have any outstanding operations on // it, so it needs to be cleaned up manually. defer func() { if err != nil { return } wasCleared := fbo.blocks.ClearCachedRef(lState, ref) if wasCleared { node := fbo.nodeCache.Get(ref) if node != nil { fbo.status.rmDirtyNode(node) } } }() } var blocksToRemove []BlockPointer // TODO: find a way to avoid so many dynamic closure dispatches. var afterUpdateFns []func() error afterUpdateFns = append(afterUpdateFns, func() error { // Any new files or directories need their pointers explicitly // updated, because the sync will be treating them as a new // ref, and not an update. for _, bs := range bps.blockStates { if newBlocks[bs.oldPtr] { fbo.blocks.updatePointer( md.ReadOnly(), bs.oldPtr, bs.blockPtr, false) } } return nil }) fbo.log.LazyTrace(ctx, "Syncing %d file(s)", len(dirtyFiles)) fbo.log.CDebugf(ctx, "Syncing %d file(s)", len(dirtyFiles)) fileSyncBlocks := newBlockPutState(1) for _, ref := range dirtyFiles { node := fbo.nodeCache.Get(ref) if node == nil { continue } file := fbo.nodeCache.PathFromNode(node) fbo.log.CDebugf(ctx, "Syncing file %v (%s)", ref, file) // Start the sync for this dirty file. doSync, stillDirty, fblock, newLbc, newBps, syncState, cleanup, err := fbo.startSyncLocked(ctx, lState, md, node, file) if cleanup != nil { // Note: This passes the same `blocksToRemove` into each // cleanup function. That's ok, as only the ones // pertaining to a particular syncing file will be acted // on. cleanups = append(cleanups, func(ctx context.Context, lState *lockState, err error) { cleanup(ctx, lState, blocksToRemove, err) }) } if err != nil { return err } if !doSync { if !stillDirty { fbo.status.rmDirtyNode(node) } continue } // Merge the per-file sync info into the batch sync info. bps.mergeOtherBps(newBps) fileSyncBlocks.mergeOtherBps(newBps) resolvedPaths[file.tailPointer()] = file parent := file.parentPath().tailPointer() if _, ok := fileBlocks[parent]; !ok { fileBlocks[parent] = make(map[string]*FileBlock) } fileBlocks[parent][file.tailName()] = fblock // Collect its `afterUpdateFn` along with all the others, so // they all get invoked under the same lock, to avoid any // weird races. afterUpdateFns = append(afterUpdateFns, func() error { // This will be called after the node cache is updated, so // this newPath will be correct. newPath := fbo.nodeCache.PathFromNode(node) stillDirty, err := fbo.blocks.FinishSyncLocked( ctx, lState, file, newPath, md.ReadOnly(), syncState, fbo.fbm) if !stillDirty { fbo.status.rmDirtyNode(node) } return err }) // Add an "update" for all the parent directory updates, and // make a chain for the file itself, so they're treated like // updates during the prepping. lastOp := md.Data().Changes.Ops[len(md.Data().Changes.Ops)-1] addSelfUpdatesAndParent(file, lastOp, parentsToAddChainsFor) // Update the combined local block cache with this file's // dirty entry. parentPtr := file.parentPath().tailPointer() if _, ok := lbc[parentPtr]; ok { lbc[parentPtr].Children[file.tailName()] = newLbc[parentPtr].Children[file.tailName()] } else { lbc[parentPtr] = newLbc[parentPtr] } } session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return err } tempIRMD := ImmutableRootMetadata{ ReadOnlyRootMetadata: md.ReadOnly(), lastWriterVerifyingKey: session.VerifyingKey, } fbo.log.LazyTrace(ctx, "Prepping update") // Create a set of chains for this batch, a succinct summary of // the file and directory blocks that need to change during this // sync. syncChains, err := newCRChains( ctx, fbo.config.Codec(), []chainMetadata{tempIRMD}, &fbo.blocks, false) if err != nil { return err } for ptr := range parentsToAddChainsFor { syncChains.addNoopChain(ptr) } // All originals never made it to the server, so don't unmerged // them. syncChains.doNotUnrefPointers = syncChains.createdOriginals head, _ := fbo.getHead(lState) dummyHeadChains := newCRChainsEmpty() dummyHeadChains.mostRecentChainMDInfo = mostRecentChainMetadataInfo{ head, head.Data().Dir.BlockInfo} // Squash the batch of updates together into a set of blocks and // ready `md` for putting to the server. md.AddOp(newResolutionOp()) _, newBps, blocksToDelete, err := fbo.prepper.prepUpdateForPaths( ctx, lState, md, syncChains, dummyHeadChains, tempIRMD, head, resolvedPaths, lbc, fileBlocks, fbo.config.DirtyBlockCache(), prepFolderDontCopyIndirectFileBlocks) if err != nil { return err } if len(blocksToDelete) > 0 { return errors.Errorf("Unexpectedly found unflushed blocks to delete "+ "during syncAllLocked: %v", blocksToDelete) } bps.mergeOtherBps(newBps) defer func() { if err != nil { // Remove any blocks that are covered by file syncs -- // those might get reused upon sync retry. All other // blocks are fair game for cleanup though. bps.removeOtherBps(fileSyncBlocks) fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail) } }() // Put all the blocks. blocksToRemove, err = doBlockPuts(ctx, fbo.config.BlockServer(), fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, fbo.deferLog, md.TlfID(), md.GetTlfHandle().GetCanonicalName(), *bps) if err != nil { return err } // Call this under the same blockLock as when the pointers are // updated, so there's never any point in time where a read or // write might slip in after the pointers are updated, but before // the deferred writes are re-applied. afterUpdateFn := func() error { var errs []error for _, auf := range afterUpdateFns { err := auf() if err != nil { errs = append(errs, err) } } if len(errs) == 1 { return errs[0] } else if len(errs) > 1 { return errors.Errorf("Got errors %+v", errs) } return nil } return fbo.finalizeMDWriteLocked(ctx, lState, md, bps, excl, func(md ImmutableRootMetadata) error { // Just update the pointers using the resolutionOp, all // the ops have already been notified. err = fbo.blocks.UpdatePointers( md, lState, md.data.Changes.Ops[0], false, afterUpdateFn) if err != nil { return err } fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md}) return nil }) } func (fbo *folderBranchOps) syncAllUnlocked( ctx context.Context, lState *lockState) error { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) select { case <-ctx.Done(): // We've already been canceled, possibly because we're a CR // and a write just called cr.ForceCancel. Don't allow the // SyncAll to complete, because if no other writes happen // we'll get stuck forever (see KBFS-2505). Instead, wait for // the next `SyncAll` to trigger. return ctx.Err() default: } return fbo.syncAllLocked(ctx, lState, NoExcl) } // SyncAll implements the KBFSOps interface for folderBranchOps. func (fbo *folderBranchOps) SyncAll( ctx context.Context, folderBranch FolderBranch) (err error) { fbo.log.CDebugf(ctx, "SyncAll") defer func() { fbo.deferLog.CDebugf(ctx, "SyncAll done: %+v", err) }() if folderBranch != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, folderBranch} } return fbo.doMDWriteWithRetryUnlessCanceled(ctx, func(lState *lockState) error { return fbo.syncAllLocked(ctx, lState, NoExcl) }) } func (fbo *folderBranchOps) FolderStatus( ctx context.Context, folderBranch FolderBranch) ( fbs FolderBranchStatus, updateChan <-chan StatusUpdate, err error) { fbo.log.CDebugf(ctx, "Status") defer func() { fbo.deferLog.CDebugf(ctx, "Status done: %+v", err) }() if folderBranch != fbo.folderBranch { return FolderBranchStatus{}, nil, WrongOpsError{fbo.folderBranch, folderBranch} } return fbo.status.getStatus(ctx, &fbo.blocks) } func (fbo *folderBranchOps) Status( ctx context.Context) ( fbs KBFSStatus, updateChan <-chan StatusUpdate, err error) { return KBFSStatus{}, nil, InvalidOpError{} } // RegisterForChanges registers a single Observer to receive // notifications about this folder/branch. func (fbo *folderBranchOps) RegisterForChanges(obs Observer) error { // It's the caller's responsibility to make sure // RegisterForChanges isn't called twice for the same Observer fbo.observers.add(obs) return nil } // UnregisterFromChanges stops an Observer from getting notifications // about the folder/branch. func (fbo *folderBranchOps) UnregisterFromChanges(obs Observer) error { fbo.observers.remove(obs) return nil } // notifyBatchLocked sends out a notification for all the ops in md. func (fbo *folderBranchOps) notifyBatchLocked( ctx context.Context, lState *lockState, md ImmutableRootMetadata) error { fbo.headLock.AssertLocked(lState) for _, op := range md.data.Changes.Ops { err := fbo.notifyOneOpLocked(ctx, lState, op, md.ReadOnly(), false) if err != nil { return err } } fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md}) return nil } // searchForNode tries to figure out the path to the given // blockPointer, using only the block updates that happened as part of // a given MD update operation. func (fbo *folderBranchOps) searchForNode(ctx context.Context, ptr BlockPointer, md ReadOnlyRootMetadata) (Node, error) { // Record which pointers are new to this update, and thus worth // searching. newPtrs := make(map[BlockPointer]bool) for _, op := range md.data.Changes.Ops { for _, update := range op.allUpdates() { newPtrs[update.Ref] = true } for _, ref := range op.Refs() { newPtrs[ref] = true } } nodeMap, _, err := fbo.blocks.SearchForNodes(ctx, fbo.nodeCache, []BlockPointer{ptr}, newPtrs, md, md.data.Dir.BlockPointer) if err != nil { return nil, err } n, ok := nodeMap[ptr] if !ok { return nil, NodeNotFoundError{ptr} } return n, nil } func (fbo *folderBranchOps) getUnlinkPathBeforeUpdatingPointers( ctx context.Context, lState *lockState, md ReadOnlyRootMetadata, op op) ( unlinkPath path, unlinkDe DirEntry, toUnlink bool, err error) { fbo.mdWriterLock.AssertLocked(lState) if len(md.data.Changes.Ops) == 0 { return path{}, DirEntry{}, false, errors.New("md needs at least one op") } var node Node var childName string requireResFix := false switch realOp := op.(type) { case *rmOp: if realOp.Dir.Ref == realOp.Dir.Unref { requireResFix = true } node = fbo.nodeCache.Get(realOp.Dir.Unref.Ref()) childName = realOp.OldName case *renameOp: if realOp.NewDir.Unref != zeroPtr { // moving to a new dir if realOp.NewDir.Ref == realOp.NewDir.Unref { requireResFix = true } node = fbo.nodeCache.Get(realOp.NewDir.Unref.Ref()) } else { // moving to the same dir if realOp.OldDir.Ref == realOp.OldDir.Unref { requireResFix = true } node = fbo.nodeCache.Get(realOp.OldDir.Unref.Ref()) } childName = realOp.NewName } if node == nil { return path{}, DirEntry{}, false, nil } p, err := fbo.pathFromNodeForRead(node) if err != nil { return path{}, DirEntry{}, false, err } // If the first op in this MD update is a resolutionOp, we need to // inspect it to look for the *real* original pointer for this // node. Though only do that if the op we're processing is // actually a part of this MD object; if it's the latest cached // dirOp, then the resOp we're looking at belongs to a previous // revision. if resOp, ok := md.data.Changes.Ops[0].(*resolutionOp); ok && (len(fbo.dirOps) == 0 || op != fbo.dirOps[len(fbo.dirOps)-1].dirOp) { for _, update := range resOp.allUpdates() { if update.Ref == p.tailPointer() { fbo.log.CDebugf(ctx, "Backing up ptr %v in op %s to original pointer %v", p.tailPointer(), op, update.Unref) p.path[len(p.path)-1].BlockPointer = update.Unref requireResFix = false break } } } if requireResFix { // If we didn't fix up the pointer using a resolutionOp, the // directory was likely created during this md update, and so // no unlinking is needed. fbo.log.CDebugf(ctx, "Ignoring unlink when resolutionOp never fixed up %v", p.tailPointer()) return path{}, DirEntry{}, false, nil } // If the original (clean) parent block is already GC'd from the // server, this might not work, but hopefully we'd be // fast-forwarding in that case anyway. dblock, err := fbo.blocks.GetDir(ctx, lState, md, p, blockRead) if err != nil { fbo.log.CDebugf(ctx, "Couldn't get the dir entry for %s in %v: %+v", childName, p.tailPointer(), err) return path{}, DirEntry{}, false, nil } de, ok := dblock.Children[childName] if !ok { return path{}, DirEntry{}, false, nil } childPath := p.ChildPath(childName, de.BlockPointer) return childPath, de, true, nil } func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context, lState *lockState, op op, md ReadOnlyRootMetadata, shouldPrefetch bool) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) if fbo.config.Mode() == InitMinimal { // There is no node cache in minimal mode, so there's nothing // to update. return nil } // We need to get unlinkPath before calling UpdatePointers so that // nodeCache.Unlink can properly update cachedPath. unlinkPath, unlinkDe, toUnlink, err := fbo.getUnlinkPathBeforeUpdatingPointers(ctx, lState, md, op) if err != nil { return err } err = fbo.blocks.UpdatePointers(md, lState, op, shouldPrefetch, nil) if err != nil { return err } var changes []NodeChange switch realOp := op.(type) { default: fbo.log.CDebugf(ctx, "Unknown op: %s", op) return nil case *createOp: node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref()) if node == nil { return nil // Nothing to do. } fbo.log.CDebugf(ctx, "notifyOneOp: create %s in node %s", realOp.NewName, getNodeIDStr(node)) changes = append(changes, NodeChange{ Node: node, DirUpdated: []string{realOp.NewName}, }) case *rmOp: node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref()) if node == nil { return nil // Nothing to do. } fbo.log.CDebugf(ctx, "notifyOneOp: remove %s in node %s", realOp.OldName, getNodeIDStr(node)) changes = append(changes, NodeChange{ Node: node, DirUpdated: []string{realOp.OldName}, }) // If this node exists, then the child node might exist too, // and we need to unlink it in the node cache. if toUnlink { _ = fbo.nodeCache.Unlink(unlinkDe.Ref(), unlinkPath, unlinkDe) } case *renameOp: oldNode := fbo.nodeCache.Get(realOp.OldDir.Ref.Ref()) if oldNode != nil { changes = append(changes, NodeChange{ Node: oldNode, DirUpdated: []string{realOp.OldName}, }) } var newNode Node if realOp.NewDir.Ref != zeroPtr { newNode = fbo.nodeCache.Get(realOp.NewDir.Ref.Ref()) if newNode != nil { changes = append(changes, NodeChange{ Node: newNode, DirUpdated: []string{realOp.NewName}, }) } } else { newNode = oldNode if oldNode != nil { // Add another name to the existing NodeChange. changes[len(changes)-1].DirUpdated = append(changes[len(changes)-1].DirUpdated, realOp.NewName) } } if oldNode != nil { fbo.log.CDebugf(ctx, "notifyOneOp: rename %v from %s/%s to %s/%s", realOp.Renamed, realOp.OldName, getNodeIDStr(oldNode), realOp.NewName, getNodeIDStr(newNode)) if newNode == nil { if childNode := fbo.nodeCache.Get(realOp.Renamed.Ref()); childNode != nil { // if the childNode exists, we still have to update // its path to go through the new node. That means // creating nodes for all the intervening paths. // Unfortunately we don't have enough information to // know what the newPath is; we have to guess it from // the updates. var err error newNode, err = fbo.searchForNode(ctx, realOp.NewDir.Ref, md) if newNode == nil { fbo.log.CErrorf(ctx, "Couldn't find the new node: %v", err) } } } if newNode != nil { if toUnlink { _ = fbo.nodeCache.Unlink( unlinkDe.Ref(), unlinkPath, unlinkDe) } _, err := fbo.nodeCache.Move( realOp.Renamed.Ref(), newNode, realOp.NewName) if err != nil { return err } } } case *syncOp: node := fbo.nodeCache.Get(realOp.File.Ref.Ref()) if node == nil { return nil // Nothing to do. } fbo.log.CDebugf(ctx, "notifyOneOp: sync %d writes in node %s", len(realOp.Writes), getNodeIDStr(node)) changes = append(changes, NodeChange{ Node: node, FileUpdated: realOp.Writes, }) case *setAttrOp: node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref()) if node == nil { return nil // Nothing to do. } fbo.log.CDebugf(ctx, "notifyOneOp: setAttr %s for file %s in node %s", realOp.Attr, realOp.Name, getNodeIDStr(node)) p, err := fbo.pathFromNodeForRead(node) if err != nil { return err } childNode, err := fbo.blocks.UpdateCachedEntryAttributes( ctx, lState, md, p, realOp) if err != nil { return err } if childNode == nil { return nil // Nothing to do. } changes = append(changes, NodeChange{ Node: childNode, }) case *GCOp: // Unreferenced blocks in a GCOp mean that we shouldn't cache // them anymore fbo.log.CDebugf(ctx, "notifyOneOp: GCOp with latest rev %d and %d unref'd blocks", realOp.LatestRev, len(realOp.Unrefs())) bcache := fbo.config.BlockCache() idsToDelete := make([]kbfsblock.ID, 0, len(realOp.Unrefs())) for _, ptr := range realOp.Unrefs() { idsToDelete = append(idsToDelete, ptr.ID) if err := bcache.DeleteTransient(ptr, fbo.id()); err != nil { fbo.log.CDebugf(ctx, "Couldn't delete transient entry for %v: %v", ptr, err) } } diskCache := fbo.config.DiskBlockCache() if diskCache != nil { go diskCache.Delete(ctx, idsToDelete) } case *resolutionOp: // If there are any unrefs of blocks that have a node, this is an // implied rmOp (see KBFS-1424). reverseUpdates := make(map[BlockPointer]BlockPointer) for _, unref := range op.Unrefs() { node := fbo.nodeCache.Get(unref.Ref()) if node == nil { // TODO: even if we don't have the node that was // unreferenced, we might have its parent, and that // parent might need an invalidation. continue } // If there is a node, unlink and invalidate. p, err := fbo.pathFromNodeForRead(node) if err != nil { fbo.log.CErrorf(ctx, "Couldn't get path: %v", err) continue } if !p.hasValidParent() { fbo.log.CErrorf(ctx, "Removed node %s has no parent", p) continue } parentPath := p.parentPath() parentNode := fbo.nodeCache.Get(parentPath.tailRef()) if parentNode != nil { changes = append(changes, NodeChange{ Node: parentNode, DirUpdated: []string{p.tailName()}, }) } fbo.log.CDebugf(ctx, "resolutionOp: remove %s, node %s", p.tailPointer(), getNodeIDStr(node)) // Revert the path back to the original BlockPointers, // before the updates were applied. if len(reverseUpdates) == 0 { for _, update := range op.allUpdates() { reverseUpdates[update.Ref] = update.Unref } } for i, pNode := range p.path { if oldPtr, ok := reverseUpdates[pNode.BlockPointer]; ok { p.path[i].BlockPointer = oldPtr } } de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, p) if err != nil { fbo.log.CDebugf(ctx, "Couldn't get the dir entry for %s/%v: %+v", p, p.tailPointer(), err) } _ = fbo.nodeCache.Unlink(p.tailRef(), p, de) } if len(changes) == 0 { return nil } } fbo.observers.batchChanges(ctx, changes) return nil } func (fbo *folderBranchOps) notifyOneOp(ctx context.Context, lState *lockState, op op, md ReadOnlyRootMetadata, shouldPrefetch bool) error { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) return fbo.notifyOneOpLocked(ctx, lState, op, md, shouldPrefetch) } func (fbo *folderBranchOps) getCurrMDRevisionLocked(lState *lockState) kbfsmd.Revision { fbo.headLock.AssertAnyLocked(lState) if fbo.head != (ImmutableRootMetadata{}) { return fbo.head.Revision() } return kbfsmd.RevisionUninitialized } func (fbo *folderBranchOps) getCurrMDRevision( lState *lockState) kbfsmd.Revision { fbo.headLock.RLock(lState) defer fbo.headLock.RUnlock(lState) return fbo.getCurrMDRevisionLocked(lState) } type applyMDUpdatesFunc func(context.Context, *lockState, []ImmutableRootMetadata) error func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context, lState *lockState, rmds []ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) // If there's anything in the journal, don't apply these MDs. // Wait for CR to happen. if fbo.isMasterBranchLocked(lState) { mergedRev, err := fbo.getJournalPredecessorRevision(ctx) if err == errNoFlushedRevisions { // If the journal is still on the initial revision, ignore // the error and fall through to ignore CR. mergedRev = kbfsmd.RevisionInitial } else if err != nil { return err } if mergedRev != kbfsmd.RevisionUninitialized { if len(rmds) > 0 { // We should update our view of the merged master though, // to avoid re-registering for the same updates again. func() { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) fbo.setLatestMergedRevisionLocked( ctx, lState, rmds[len(rmds)-1].Revision(), false) }() } fbo.log.CDebugf(ctx, "Ignoring fetched revisions while MDs are in journal") return nil } } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // if we have staged changes, ignore all updates until conflict // resolution kicks in. TODO: cache these for future use. if !fbo.isMasterBranchLocked(lState) { if len(rmds) > 0 { latestMerged := rmds[len(rmds)-1] // Don't trust un-put updates here because they might have // come from our own journal before the conflict was // detected. Assume we'll hear about the conflict via // callbacks from the journal. if !latestMerged.putToServer { return UnmergedError{} } // setHeadLocked takes care of merged case fbo.setLatestMergedRevisionLocked( ctx, lState, latestMerged.Revision(), false) unmergedRev := kbfsmd.RevisionUninitialized if fbo.head != (ImmutableRootMetadata{}) { unmergedRev = fbo.head.Revision() } fbo.cr.Resolve(ctx, unmergedRev, latestMerged.Revision()) } return UnmergedError{} } // Don't allow updates while we're in the dirty state; the next // sync will put us into an unmerged state anyway and we'll // require conflict resolution. if fbo.blocks.GetState(lState) != cleanState { return errors.WithStack(NoUpdatesWhileDirtyError{}) } appliedRevs := make([]ImmutableRootMetadata, 0, len(rmds)) for _, rmd := range rmds { // check that we're applying the expected MD revision if rmd.Revision() <= fbo.getCurrMDRevisionLocked(lState) { // Already caught up! continue } if err := isReadableOrError(ctx, fbo.config.KBPKI(), rmd.ReadOnly()); err != nil { return err } err := fbo.setHeadSuccessorLocked(ctx, lState, rmd, false) if err != nil { return err } // No new operations in these. if rmd.IsWriterMetadataCopiedSet() { continue } for _, op := range rmd.data.Changes.Ops { err := fbo.notifyOneOpLocked(ctx, lState, op, rmd.ReadOnly(), true) if err != nil { return err } } if rmd.IsRekeySet() { // One might have concern that a MD update written by the device // itself can slip in here, for example during the rekey after // setting paper prompt, and the event may cause the paper prompt // to be unset. This is not a problem because 1) the revision check // above shouldn't allow MD update written by this device to reach // here; 2) the rekey FSM doesn't touch anything if it has the // paper prompt set and is in scheduled state. fbo.rekeyFSM.Event(NewRekeyRequestEvent()) } else { fbo.rekeyFSM.Event(NewRekeyNotNeededEvent()) } appliedRevs = append(appliedRevs, rmd) } if len(appliedRevs) > 0 { fbo.editHistory.UpdateHistory(ctx, appliedRevs) } return nil } func (fbo *folderBranchOps) undoMDUpdatesLocked(ctx context.Context, lState *lockState, rmds []ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) // Don't allow updates while we're in the dirty state; the next // sync will put us into an unmerged state anyway and we'll // require conflict resolution. if fbo.blocks.GetState(lState) != cleanState { return NotPermittedWhileDirtyError{} } // go backwards through the updates for i := len(rmds) - 1; i >= 0; i-- { rmd := rmds[i] // on undo, it's ok to re-apply the current revision since you // need to invert all of its ops. // // This duplicates a check in // fbo.setHeadPredecessorLocked. TODO: Remove this // duplication. if rmd.Revision() != fbo.getCurrMDRevisionLocked(lState) && rmd.Revision() != fbo.getCurrMDRevisionLocked(lState)-1 { return MDUpdateInvertError{rmd.Revision(), fbo.getCurrMDRevisionLocked(lState)} } // TODO: Check that the revisions are equal only for // the first iteration. if rmd.Revision() < fbo.getCurrMDRevisionLocked(lState) { err := fbo.setHeadPredecessorLocked(ctx, lState, rmd) if err != nil { return err } } // iterate the ops in reverse and invert each one ops := rmd.data.Changes.Ops for j := len(ops) - 1; j >= 0; j-- { io, err := invertOpForLocalNotifications(ops[j]) if err != nil { fbo.log.CWarningf(ctx, "got error %v when invert op %v; "+ "skipping. Open file handles "+ "may now be in an invalid "+ "state, which can be fixed by "+ "either closing them all or "+ "restarting KBFS.", err, ops[j]) continue } err = fbo.notifyOneOpLocked(ctx, lState, io, rmd.ReadOnly(), false) if err != nil { return err } } } // TODO: update the edit history? return nil } func (fbo *folderBranchOps) applyMDUpdates(ctx context.Context, lState *lockState, rmds []ImmutableRootMetadata) error { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.applyMDUpdatesLocked(ctx, lState, rmds) } func (fbo *folderBranchOps) getLatestMergedRevision(lState *lockState) kbfsmd.Revision { fbo.headLock.RLock(lState) defer fbo.headLock.RUnlock(lState) return fbo.latestMergedRevision } // caller should have held fbo.headLock func (fbo *folderBranchOps) setLatestMergedRevisionLocked(ctx context.Context, lState *lockState, rev kbfsmd.Revision, allowBackward bool) { fbo.headLock.AssertLocked(lState) if rev == kbfsmd.RevisionUninitialized { panic("Cannot set latest merged revision to an uninitialized value") } if fbo.latestMergedRevision < rev || allowBackward { fbo.latestMergedRevision = rev fbo.log.CDebugf(ctx, "Updated latestMergedRevision to %d.", rev) } else { fbo.log.CDebugf(ctx, "Local latestMergedRevision (%d) is higher than "+ "the new revision (%d); won't update.", fbo.latestMergedRevision, rev) } } // Assumes all necessary locking is either already done by caller, or // is done by applyFunc. func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context, lState *lockState, lockBeforeGet *keybase1.LockID, applyFunc applyMDUpdatesFunc) error { // first look up all MD revisions newer than my current head start := fbo.getLatestMergedRevision(lState) + 1 rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), start, lockBeforeGet) if err != nil { return err } err = applyFunc(ctx, lState, rmds) if err != nil { return err } return nil } func (fbo *folderBranchOps) getAndApplyNewestUnmergedHead(ctx context.Context, lState *lockState) error { fbo.log.CDebugf(ctx, "Fetching the newest unmerged head") bid := func() BranchID { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.bid }() // We can only ever be at most one revision behind, so fetch the // latest unmerged revision and apply it as a successor. md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), bid) if err != nil { return err } if md == (ImmutableRootMetadata{}) { // There is no unmerged revision, oops! return errors.New("Couldn't find an unmerged head") } fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) if fbo.bid != bid { // The branches switched (apparently CR completed), so just // try again. fbo.log.CDebugf(ctx, "Branches switched while fetching unmerged head") return nil } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) if err := fbo.setHeadSuccessorLocked(ctx, lState, md, false); err != nil { return err } if err := fbo.notifyBatchLocked(ctx, lState, md); err != nil { return err } return fbo.config.MDCache().Put(md) } // getUnmergedMDUpdates returns a slice of the unmerged MDs for this // TLF's current unmerged branch and unmerged branch, between the // merge point for the branch and the current head. The returned MDs // are the same instances that are stored in the MD cache, so they // should be modified with care. func (fbo *folderBranchOps) getUnmergedMDUpdates( ctx context.Context, lState *lockState) ( kbfsmd.Revision, []ImmutableRootMetadata, error) { // acquire mdWriterLock to read the current branch ID. bid := func() BranchID { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.bid }() return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(), bid, fbo.getCurrMDRevision(lState)) } func (fbo *folderBranchOps) getUnmergedMDUpdatesLocked( ctx context.Context, lState *lockState) ( kbfsmd.Revision, []ImmutableRootMetadata, error) { fbo.mdWriterLock.AssertLocked(lState) return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(), fbo.bid, fbo.getCurrMDRevision(lState)) } // Returns a list of block pointers that were created during the // staged era. func (fbo *folderBranchOps) undoUnmergedMDUpdatesLocked( ctx context.Context, lState *lockState) ([]BlockPointer, error) { fbo.mdWriterLock.AssertLocked(lState) currHead, unmergedRmds, err := fbo.getUnmergedMDUpdatesLocked(ctx, lState) if err != nil { return nil, err } err = fbo.undoMDUpdatesLocked(ctx, lState, unmergedRmds) if err != nil { return nil, err } // We have arrived at the branch point. The new root is // the previous revision from the current head. Find it // and apply. TODO: somehow fake the current head into // being currHead-1, so that future calls to // applyMDUpdates will fetch this along with the rest of // the updates. fbo.setBranchIDLocked(lState, NullBranchID) rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID, currHead, Merged, nil) if err != nil { return nil, err } err = func() error { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) err = fbo.setHeadPredecessorLocked(ctx, lState, rmd) if err != nil { return err } fbo.setLatestMergedRevisionLocked(ctx, lState, rmd.Revision(), true) return nil }() if err != nil { return nil, err } // Return all new refs var unmergedPtrs []BlockPointer for _, rmd := range unmergedRmds { for _, op := range rmd.data.Changes.Ops { for _, ptr := range op.Refs() { if ptr != zeroPtr { unmergedPtrs = append(unmergedPtrs, ptr) } } for _, update := range op.allUpdates() { if update.Ref != zeroPtr { unmergedPtrs = append(unmergedPtrs, update.Ref) } } } } return unmergedPtrs, nil } func (fbo *folderBranchOps) unstageLocked(ctx context.Context, lState *lockState) error { fbo.mdWriterLock.AssertLocked(lState) // fetch all of my unstaged updates, and undo them one at a time bid, wasMasterBranch := fbo.bid, fbo.isMasterBranchLocked(lState) unmergedPtrs, err := fbo.undoUnmergedMDUpdatesLocked(ctx, lState) if err != nil { return err } // let the server know we no longer have need if !wasMasterBranch { err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), bid) if err != nil { return err } } // now go forward in time, if possible err = fbo.getAndApplyMDUpdates(ctx, lState, nil, fbo.applyMDUpdatesLocked) if err != nil { return err } md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState) if err != nil { return err } // Finally, create a resolutionOp with the newly-unref'd pointers. resOp := newResolutionOp() for _, ptr := range unmergedPtrs { resOp.AddUnrefBlock(ptr) } md.AddOp(resOp) bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md) if err != nil { return err } return fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl, func(md ImmutableRootMetadata) error { return fbo.notifyBatchLocked(ctx, lState, md) }) } // TODO: remove once we have automatic conflict resolution func (fbo *folderBranchOps) UnstageForTesting( ctx context.Context, folderBranch FolderBranch) (err error) { fbo.log.CDebugf(ctx, "UnstageForTesting") defer func() { fbo.deferLog.CDebugf(ctx, "UnstageForTesting done: %+v", err) }() if folderBranch != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, folderBranch} } return runUnlessCanceled(ctx, func() error { lState := makeFBOLockState() if fbo.isMasterBranch(lState) { // no-op return nil } if fbo.blocks.GetState(lState) != cleanState { return NotPermittedWhileDirtyError{} } // launch unstaging in a new goroutine, because we don't want to // use the provided context because upper layers might ignore our // notifications if we do. But we still want to wait for the // context to cancel. c := make(chan error, 1) freshCtx, cancel := fbo.newCtxWithFBOID() defer cancel() fbo.log.CDebugf(freshCtx, "Launching new context for UnstageForTesting") go func() { lState := makeFBOLockState() c <- fbo.doMDWriteWithRetry(ctx, lState, func(lState *lockState) error { return fbo.unstageLocked(freshCtx, lState) }) }() select { case err := <-c: return err case <-ctx.Done(): return ctx.Err() } }) } // mdWriterLock must be taken by the caller. func (fbo *folderBranchOps) rekeyLocked(ctx context.Context, lState *lockState, promptPaper bool) (res RekeyResult, err error) { fbo.log.CDebugf(ctx, "rekeyLocked") defer func() { fbo.deferLog.CDebugf(ctx, "rekeyLocked done: %+v %+v", res, err) }() fbo.mdWriterLock.AssertLocked(lState) if !fbo.isMasterBranchLocked(lState) { return RekeyResult{}, errors.New("can't rekey while staged") } // untrusted head is ok here. head, _ := fbo.getHead(lState) if head != (ImmutableRootMetadata{}) { // If we already have a cached revision, make sure we're // up-to-date with the latest revision before inspecting the // metadata, since Rekey doesn't let us go into CR mode, and // we don't actually get folder update notifications when the // rekey bit is set, just a "folder needs rekey" update. if err := fbo.getAndApplyMDUpdates( ctx, lState, nil, fbo.applyMDUpdatesLocked); err != nil { if applyErr, ok := err.(MDRevisionMismatch); !ok || applyErr.rev != applyErr.curr { return RekeyResult{}, err } } } md, lastWriterVerifyingKey, rekeyWasSet, err := fbo.getMDForRekeyWriteLocked(ctx, lState) if err != nil { return RekeyResult{}, err } currKeyGen := md.LatestKeyGeneration() rekeyDone, tlfCryptKey, err := fbo.config.KeyManager(). Rekey(ctx, md, promptPaper) stillNeedsRekey := false switch err.(type) { case nil: // TODO: implement a "forced" option that rekeys even when the // devices haven't changed? if !rekeyDone { fbo.log.CDebugf(ctx, "No rekey necessary") return RekeyResult{ DidRekey: false, NeedsPaperKey: false, }, nil } // Clear the rekey bit if any. md.clearRekeyBit() session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return RekeyResult{}, err } // Readers can't clear the last revision, because: // 1) They don't have access to the writer metadata, so can't clear the // block changes. // 2) Readers need the MetadataFlagWriterMetadataCopied bit set for // MDServer to authorize the write. // Without this check, MDServer returns an Unauthorized error. if md.GetTlfHandle().IsWriter(session.UID) { md.clearLastRevision() } case RekeyIncompleteError: if !rekeyDone && rekeyWasSet { // The rekey bit was already set, and there's nothing else // we can to do, so don't put any new revisions. fbo.log.CDebugf(ctx, "No further rekey possible by this user.") return RekeyResult{ DidRekey: false, NeedsPaperKey: false, }, nil } // Rekey incomplete, fallthrough without early exit, to ensure // we write the metadata with any potential changes fbo.log.CDebugf(ctx, "Rekeyed reader devices, but still need writer rekey") case NeedOtherRekeyError, NeedSelfRekeyError: stillNeedsRekey = true default: if err == context.DeadlineExceeded { fbo.log.CDebugf(ctx, "Paper key prompt timed out") // Reschedule the prompt in the timeout case. stillNeedsRekey = true } else { return RekeyResult{}, err } } if stillNeedsRekey { fbo.log.CDebugf(ctx, "Device doesn't have access to rekey") // If we didn't have read access, then we don't have any // unlocked paper keys. Wait for some time, and then if we // still aren't rekeyed, try again but this time prompt the // user for any known paper keys. We do this even if the // rekey bit is already set, since we may have restarted since // the previous rekey attempt, before prompting for the paper // key. Only schedule this as a one-time event, since direct // folder accesses from the user will also cause a // rekeyWithPrompt. if rekeyWasSet { // Devices not yet keyed shouldn't set the rekey bit again fbo.log.CDebugf(ctx, "Rekey bit already set") return RekeyResult{ DidRekey: rekeyDone, NeedsPaperKey: true, }, nil } // This device hasn't been keyed yet, fall through to set the rekey bit } // add an empty operation to satisfy assumptions elsewhere md.AddOp(newRekeyOp()) // we still let readers push a new md block that we validate against reader // permissions err = fbo.finalizeMDRekeyWriteLocked( ctx, lState, md, lastWriterVerifyingKey) if err != nil { return RekeyResult{ DidRekey: rekeyDone, NeedsPaperKey: stillNeedsRekey, }, err } // cache any new TLF crypt key if tlfCryptKey != nil { keyGen := md.LatestKeyGeneration() err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey) if err != nil { return RekeyResult{ DidRekey: rekeyDone, NeedsPaperKey: stillNeedsRekey, }, err } } // send rekey finish notification handle := md.GetTlfHandle() if currKeyGen >= FirstValidKeyGen && rekeyDone { fbo.config.Reporter().Notify(ctx, rekeyNotification(ctx, fbo.config, handle, true)) } return RekeyResult{ DidRekey: rekeyDone, NeedsPaperKey: stillNeedsRekey, }, nil } func (fbo *folderBranchOps) RequestRekey(_ context.Context, tlf tlf.ID) { fb := FolderBranch{tlf, MasterBranch} if fb != fbo.folderBranch { // TODO: log instead of panic? panic(WrongOpsError{fbo.folderBranch, fb}) } fbo.rekeyFSM.Event(NewRekeyRequestEvent()) } func (fbo *folderBranchOps) SyncFromServerForTesting(ctx context.Context, folderBranch FolderBranch, lockBeforeGet *keybase1.LockID) (err error) { fbo.log.CDebugf(ctx, "SyncFromServerForTesting") defer func() { fbo.deferLog.CDebugf(ctx, "SyncFromServerForTesting done: %+v", err) }() if folderBranch != fbo.folderBranch { return WrongOpsError{fbo.folderBranch, folderBranch} } lState := makeFBOLockState() // Make sure everything outstanding syncs to disk at least. if err := fbo.syncAllUnlocked(ctx, lState); err != nil { return err } // A journal flush before CR, if needed. if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log); err != nil { return err } if err := fbo.mdFlushes.Wait(ctx); err != nil { return err } if err := fbo.branchChanges.Wait(ctx); err != nil { return err } // Loop until we're fully updated on the master branch. for { if !fbo.isMasterBranch(lState) { if err := fbo.cr.Wait(ctx); err != nil { return err } // If we are still staged after the wait, then we have a problem. if !fbo.isMasterBranch(lState) { return errors.Errorf("Conflict resolution didn't take us out " + "of staging.") } } dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState) if len(dirtyFiles) > 0 { for _, ref := range dirtyFiles { fbo.log.CDebugf(ctx, "DeCache entry left: %v", ref) } return errors.New("can't sync from server while dirty") } // A journal flush after CR, if needed. if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log); err != nil { return err } if err := fbo.mdFlushes.Wait(ctx); err != nil { return err } if err := fbo.branchChanges.Wait(ctx); err != nil { return err } if err := fbo.getAndApplyMDUpdates( ctx, lState, lockBeforeGet, fbo.applyMDUpdates); err != nil { if applyErr, ok := err.(MDRevisionMismatch); ok { if applyErr.rev == applyErr.curr { fbo.log.CDebugf(ctx, "Already up-to-date with server") return nil } } if _, isUnmerged := err.(UnmergedError); isUnmerged { continue } else if err == errNoMergedRevWhileStaged { continue } return err } break } // Wait for all the asynchronous block archiving and quota // reclamation to hit the block server. if err := fbo.fbm.waitForArchives(ctx); err != nil { return err } if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil { return err } if err := fbo.editHistory.Wait(ctx); err != nil { return err } if err := fbo.fbm.waitForQuotaReclamations(ctx); err != nil { return err } // A second journal flush if needed, to clear out any // archive/remove calls caused by the above operations. return WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log) } // CtxFBOTagKey is the type used for unique context tags within folderBranchOps type CtxFBOTagKey int const ( // CtxFBOIDKey is the type of the tag for unique operation IDs // within folderBranchOps. CtxFBOIDKey CtxFBOTagKey = iota ) // CtxFBOOpID is the display name for the unique operation // folderBranchOps ID tag. const CtxFBOOpID = "FBOID" func (fbo *folderBranchOps) ctxWithFBOID(ctx context.Context) context.Context { return CtxWithRandomIDReplayable(ctx, CtxFBOIDKey, CtxFBOOpID, fbo.log) } func (fbo *folderBranchOps) newCtxWithFBOID() (context.Context, context.CancelFunc) { // No need to call NewContextReplayable since ctxWithFBOID calls // ctxWithRandomIDReplayable, which attaches replayably. ctx := fbo.ctxWithFBOID(context.Background()) ctx, cancelFunc := context.WithCancel(ctx) ctx, err := NewContextWithCancellationDelayer(ctx) if err != nil { panic(err) } return ctx, cancelFunc } // Run the passed function with a context that's canceled on shutdown. func (fbo *folderBranchOps) runUnlessShutdown(fn func(ctx context.Context) error) error { ctx, cancelFunc := fbo.newCtxWithFBOID() defer cancelFunc() errChan := make(chan error, 1) go func() { errChan <- fn(ctx) }() select { case err := <-errChan: return err case <-fbo.shutdownChan: return ShutdownHappenedError{} } } func (fbo *folderBranchOps) doFastForwardLocked(ctx context.Context, lState *lockState, currHead ImmutableRootMetadata) error { fbo.mdWriterLock.AssertLocked(lState) fbo.headLock.AssertLocked(lState) fbo.log.CDebugf(ctx, "Fast-forwarding from rev %d to rev %d", fbo.latestMergedRevision, currHead.Revision()) changes, err := fbo.blocks.FastForwardAllNodes( ctx, lState, currHead.ReadOnly()) if err != nil { return err } err = fbo.setHeadSuccessorLocked(ctx, lState, currHead, true /*rebase*/) if err != nil { return err } // Invalidate all the affected nodes. if len(changes) > 0 { fbo.observers.batchChanges(ctx, changes) } // Reset the edit history. TODO: notify any listeners that we've // done this. fbo.editHistory.Shutdown() fbo.editHistory = NewTlfEditHistory(fbo.config, fbo, fbo.log) return nil } func (fbo *folderBranchOps) maybeFastForward(ctx context.Context, lState *lockState, lastUpdate time.Time, currUpdate time.Time) ( fastForwardDone bool, err error) { // Has it been long enough to try fast-forwarding? if currUpdate.Before(lastUpdate.Add(fastForwardTimeThresh)) || !fbo.isMasterBranch(lState) { return false, nil } fbo.log.CDebugf(ctx, "Checking head for possible "+ "fast-forwarding (last update time=%s)", lastUpdate) currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id(), nil) if err != nil { return false, err } fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision()) fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) // If the journal has anything in it, don't fast-forward since we // haven't finished flushing yet. If there was really a remote // update on the server, we'll end up in CR eventually. mergedRev, err := fbo.getJournalPredecessorRevision(ctx) if err != nil { return false, err } if mergedRev != kbfsmd.RevisionUninitialized { return false, nil } if !fbo.isMasterBranchLocked(lState) { // Don't update if we're staged. return false, nil } fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) if currHead.Revision() < fbo.latestMergedRevision+fastForwardRevThresh { // Might as well fetch all the revisions. return false, nil } err = fbo.doFastForwardLocked(ctx, lState, currHead) if err != nil { return false, err } return true, nil } func (fbo *folderBranchOps) locallyFinalizeTLF(ctx context.Context) { lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) if fbo.head == (ImmutableRootMetadata{}) { return } // It's safe to give this a finalized number of 1 and a fake user // name. The whole point here is to move the old finalized TLF // name away to a new name, where the user won't be able to access // it anymore, and if there's a conflict with a previously-moved // TLF that shouldn't matter. now := fbo.config.Clock().Now() finalizedInfo, err := tlf.NewHandleExtension( tlf.HandleExtensionFinalized, 1, libkb.NormalizedUsername("<unknown>"), now) if err != nil { fbo.log.CErrorf(ctx, "Couldn't make finalized info: %+v", err) return } fakeSignedHead := &RootMetadataSigned{MD: fbo.head.bareMd} finalRmd, err := fakeSignedHead.MakeFinalCopy( fbo.config.Codec(), now, finalizedInfo) if err != nil { fbo.log.CErrorf(ctx, "Couldn't finalize MD: %+v", err) return } // Construct the data needed to fake a new head. mdID, err := kbfsmd.MakeID(fbo.config.Codec(), finalRmd.MD) if err != nil { fbo.log.CErrorf(ctx, "Couldn't get finalized MD ID: %+v", err) return } bareHandle, err := finalRmd.MD.MakeBareTlfHandle(fbo.head.Extra()) if err != nil { fbo.log.CErrorf(ctx, "Couldn't get finalized bare handle: %+v", err) return } handle, err := MakeTlfHandle(ctx, bareHandle, fbo.config.KBPKI()) if err != nil { fbo.log.CErrorf(ctx, "Couldn't get finalized handle: %+v", err) return } finalBrmd, ok := finalRmd.MD.(MutableBareRootMetadata) if !ok { fbo.log.CErrorf(ctx, "Couldn't get finalized mutable bare MD: %+v", err) return } // We don't have a way to sign this with a valid key (and we might // be logged out anyway), so just directly make the md immutable. finalIrmd := ImmutableRootMetadata{ ReadOnlyRootMetadata: makeRootMetadata( finalBrmd, fbo.head.Extra(), handle).ReadOnly(), mdID: mdID, } // This will trigger the handle change notification to observers. err = fbo.setHeadSuccessorLocked(ctx, lState, finalIrmd, false) if err != nil { fbo.log.CErrorf(ctx, "Couldn't set finalized MD: %+v", err) return } } func (fbo *folderBranchOps) registerAndWaitForUpdates() { defer close(fbo.updateDoneChan) childDone := make(chan struct{}) var lastUpdate time.Time err := fbo.runUnlessShutdown(func(ctx context.Context) error { defer close(childDone) // If we fail to register for or process updates, try again // with an exponential backoff, so we don't overwhelm the // server or ourselves with too many attempts in a hopeless // situation. expBackoff := backoff.NewExponentialBackOff() // Never give up hope until we shut down expBackoff.MaxElapsedTime = 0 // Register and wait in a loop unless we hit an unrecoverable error fbo.cancelUpdatesLock.Lock() if fbo.cancelUpdates != nil { // It should be impossible to get here without having // already called the cancel function, but just in case // call it here again. fbo.cancelUpdates() } ctx, fbo.cancelUpdates = context.WithCancel(ctx) fbo.cancelUpdatesLock.Unlock() for { err := backoff.RetryNotifyWithContext(ctx, func() error { // Replace the FBOID one with a fresh id for every attempt newCtx := fbo.ctxWithFBOID(ctx) updateChan, err := fbo.registerForUpdates(newCtx) if err != nil { select { case <-ctx.Done(): // Shortcut the retry, we're done. return nil default: return err } } currUpdate, err := fbo.waitForAndProcessUpdates( newCtx, lastUpdate, updateChan) switch errors.Cause(err).(type) { case UnmergedError: // skip the back-off timer and continue directly to next // registerForUpdates return nil case NewMetadataVersionError: fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+ "read the newest metadata: %+v", err) fbo.status.setPermErr(err) // No need to lock here, since `cancelUpdates` is // only set within this same goroutine. fbo.cancelUpdates() return context.Canceled case kbfsmd.ServerErrorCannotReadFinalizedTLF: fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+ "read the finalized metadata for this TLF: %+v", err) fbo.status.setPermErr(err) // Locally finalize the TLF so new accesses // through to the old folder name will find the // new folder. fbo.locallyFinalizeTLF(newCtx) // No need to lock here, since `cancelUpdates` is // only set within this same goroutine. fbo.cancelUpdates() return context.Canceled } select { case <-ctx.Done(): // Shortcut the retry, we're done. return nil default: if err == nil { lastUpdate = currUpdate } return err } }, expBackoff, func(err error, nextTime time.Duration) { fbo.log.CDebugf(ctx, "Retrying registerForUpdates in %s due to err: %v", nextTime, err) }) if err != nil { return err } } }) if err != nil && err != context.Canceled { fbo.log.CWarningf(context.Background(), "registerAndWaitForUpdates failed unexpectedly with an error: %v", err) } <-childDone } func (fbo *folderBranchOps) registerForUpdatesShouldFireNow() bool { fbo.muLastGetHead.Lock() defer fbo.muLastGetHead.Unlock() return fbo.config.Clock().Now().Sub(fbo.lastGetHead) < registerForUpdatesFireNowThreshold } func (fbo *folderBranchOps) registerForUpdates(ctx context.Context) ( updateChan <-chan error, err error) { lState := makeFBOLockState() currRev := fbo.getLatestMergedRevision(lState) fireNow := false if fbo.registerForUpdatesShouldFireNow() { ctx = rpc.WithFireNow(ctx) fireNow = true } fbo.log.CDebugf(ctx, "Registering for updates (curr rev = %d, fire now = %v)", currRev, fireNow) defer func() { fbo.deferLog.CDebugf(ctx, "Registering for updates (curr rev = %d, fire now = %v) done: %+v", currRev, fireNow, err) }() // RegisterForUpdate will itself retry on connectivity issues return fbo.config.MDServer().RegisterForUpdate(ctx, fbo.id(), currRev) } func (fbo *folderBranchOps) waitForAndProcessUpdates( ctx context.Context, lastUpdate time.Time, updateChan <-chan error) (currUpdate time.Time, err error) { // successful registration; now, wait for an update or a shutdown fbo.log.CDebugf(ctx, "Waiting for updates") defer func() { fbo.deferLog.CDebugf(ctx, "Waiting for updates done: %+v", err) }() lState := makeFBOLockState() for { select { case err := <-updateChan: fbo.log.CDebugf(ctx, "Got an update: %v", err) if err != nil { return time.Time{}, err } // Getting and applying the updates requires holding // locks, so make sure it doesn't take too long. ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout) defer cancel() currUpdate := fbo.config.Clock().Now() ffDone, err := fbo.maybeFastForward(ctx, lState, lastUpdate, currUpdate) if err != nil { return time.Time{}, err } if ffDone { return currUpdate, nil } err = fbo.getAndApplyMDUpdates(ctx, lState, nil, fbo.applyMDUpdates) if err != nil { fbo.log.CDebugf(ctx, "Got an error while applying "+ "updates: %v", err) return time.Time{}, err } return currUpdate, nil case unpause := <-fbo.updatePauseChan: fbo.log.CInfof(ctx, "Updates paused") // wait to be unpaused select { case <-unpause: fbo.log.CInfof(ctx, "Updates unpaused") case <-ctx.Done(): return time.Time{}, ctx.Err() } case <-ctx.Done(): return time.Time{}, ctx.Err() } } } func (fbo *folderBranchOps) getCachedDirOpsCount(lState *lockState) int { fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return len(fbo.dirOps) } func (fbo *folderBranchOps) backgroundFlusher() { lState := makeFBOLockState() var prevDirtyFileMap map[BlockRef]bool sameDirtyFileCount := 0 for { doSelect := true if fbo.blocks.GetState(lState) == dirtyState && fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) && sameDirtyFileCount < 10 { // We have dirty files, and the system has a full buffer, // so don't bother waiting for a signal, just get right to // the main attraction. doSelect = false } else if fbo.getCachedDirOpsCount(lState) >= fbo.config.BGFlushDirOpBatchSize() { doSelect = false } if doSelect { // Wait until we really have a write waiting. doWait := true select { case <-fbo.syncNeededChan: if fbo.getCachedDirOpsCount(lState) >= fbo.config.BGFlushDirOpBatchSize() { doWait = false } case <-fbo.forceSyncChan: doWait = false case <-fbo.shutdownChan: return } if doWait { timer := time.NewTimer(fbo.config.BGFlushPeriod()) // Loop until either a tick's worth of time passes, // the batch size of directory ops is full, a sync is // forced, or a shutdown happens. loop: for { select { case <-timer.C: break loop case <-fbo.syncNeededChan: if fbo.getCachedDirOpsCount(lState) >= fbo.config.BGFlushDirOpBatchSize() { break loop } case <-fbo.forceSyncChan: break loop case <-fbo.shutdownChan: return } } } } dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState) dirOpsCount := fbo.getCachedDirOpsCount(lState) if len(dirtyFiles) == 0 && dirOpsCount == 0 { sameDirtyFileCount = 0 continue } // Make sure we are making some progress currDirtyFileMap := make(map[BlockRef]bool) for _, ref := range dirtyFiles { currDirtyFileMap[ref] = true } if reflect.DeepEqual(currDirtyFileMap, prevDirtyFileMap) { sameDirtyFileCount++ } else { sameDirtyFileCount = 0 } prevDirtyFileMap = currDirtyFileMap fbo.runUnlessShutdown(func(ctx context.Context) (err error) { // Denote that these are coming from a background // goroutine, not directly from any user. ctx = NewContextReplayable(ctx, func(ctx context.Context) context.Context { return context.WithValue(ctx, CtxBackgroundSyncKey, "1") }) fbo.log.CDebugf(ctx, "Background sync triggered: %d dirty files, "+ "%d dir ops in batch", len(dirtyFiles), dirOpsCount) if sameDirtyFileCount >= 100 { // If the local journal is full, we might not be able to // make progress until more data is flushed to the // servers, so just warn here rather than just an outright // panic. fbo.log.CWarningf(ctx, "Making no Sync progress on dirty "+ "files after %d attempts: %v", sameDirtyFileCount, dirtyFiles) } // Just in case network access or a bug gets stuck for a // long time, time out the sync eventually. longCtx, longCancel := context.WithTimeout(ctx, backgroundTaskTimeout) defer longCancel() err = fbo.SyncAll(longCtx, fbo.folderBranch) if err != nil { // Just log the warning and keep trying to // sync the rest of the dirty files. fbo.log.CWarningf(ctx, "Couldn't sync all: %+v", err) } return nil }) } } func (fbo *folderBranchOps) blockUnmergedWrites(lState *lockState) { fbo.mdWriterLock.Lock(lState) } func (fbo *folderBranchOps) unblockUnmergedWrites(lState *lockState) { fbo.mdWriterLock.Unlock(lState) } func (fbo *folderBranchOps) finalizeResolutionLocked(ctx context.Context, lState *lockState, md *RootMetadata, bps *blockPutState, newOps []op, blocksToDelete []kbfsblock.ID) error { fbo.mdWriterLock.AssertLocked(lState) // Put the blocks into the cache so that, even if we fail below, // future attempts may reuse the blocks. err := fbo.finalizeBlocks(bps) if err != nil { return err } // Last chance to get pre-empted. select { case <-ctx.Done(): return ctx.Err() default: } session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return err } irmd, err := fbo.config.MDOps().ResolveBranch(ctx, fbo.id(), fbo.bid, blocksToDelete, md, session.VerifyingKey) doUnmergedPut := isRevisionConflict(err) if doUnmergedPut { fbo.log.CDebugf(ctx, "Got a conflict after resolution; aborting CR") return err } if err != nil { return err } // Queue a rekey if the bit was set. if md.IsRekeySet() { defer fbo.config.RekeyQueue().Enqueue(md.TlfID()) } md.loadCachedBlockChanges(ctx, bps, fbo.log) // Set the head to the new MD. fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) err = fbo.setHeadConflictResolvedLocked(ctx, lState, irmd) if err != nil { fbo.log.CWarningf(ctx, "Couldn't set local MD head after a "+ "successful put: %v", err) return err } fbo.setBranchIDLocked(lState, NullBranchID) // Archive the old, unref'd blocks if journaling is off. if !TLFJournalEnabled(fbo.config, fbo.id()) { fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly()) } mdCopyWithLocalOps, err := md.deepCopy(fbo.config.Codec()) if err != nil { return err } mdCopyWithLocalOps.data.Changes.Ops = newOps // notifyOneOp for every fixed-up merged op. for _, op := range newOps { err := fbo.notifyOneOpLocked( ctx, lState, op, mdCopyWithLocalOps.ReadOnly(), false) if err != nil { return err } } fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{irmd}) return nil } // finalizeResolution caches all the blocks, and writes the new MD to // the merged branch, failing if there is a conflict. It also sends // out the given newOps notifications locally. This is used for // completing conflict resolution. func (fbo *folderBranchOps) finalizeResolution(ctx context.Context, lState *lockState, md *RootMetadata, bps *blockPutState, newOps []op, blocksToDelete []kbfsblock.ID) error { // Take the writer lock. fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) return fbo.finalizeResolutionLocked( ctx, lState, md, bps, newOps, blocksToDelete) } func (fbo *folderBranchOps) unstageAfterFailedResolution(ctx context.Context, lState *lockState) error { // Take the writer lock. fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) // Last chance to get pre-empted. select { case <-ctx.Done(): return ctx.Err() default: } // We don't want context cancellation after this point, so use a linked // context. There is no race since the linked context has an independent // Done channel. // // Generally we don't want to have any errors in unstageLocked since and // this solution is chosen because: // * If the error is caused by a cancelled context then the recovery (archiving) // would need to use a separate context anyways. // * In such cases we would have to be very careful where the error occurs // and what to archive, making that solution much more complicated. // * The other "common" error case is losing server connection and after // detecting that we won't have much luck archiving things anyways. ctx = newLinkedContext(ctx) fbo.log.CWarningf(ctx, "Unstaging branch %s after a resolution failure", fbo.bid) return fbo.unstageLocked(ctx, lState) } func (fbo *folderBranchOps) handleTLFBranchChange(ctx context.Context, newBID BranchID) { lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) fbo.log.CDebugf(ctx, "Journal branch change: %s", newBID) if !fbo.isMasterBranchLocked(lState) { if fbo.bid == newBID { fbo.log.CDebugf(ctx, "Already on branch %s", newBID) return } panic(fmt.Sprintf("Cannot switch to branch %s while on branch %s", newBID, fbo.bid)) } md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), newBID) if err != nil { fbo.log.CWarningf(ctx, "No unmerged head on journal branch change (bid=%s)", newBID) return } if md == (ImmutableRootMetadata{}) || md.MergedStatus() != Unmerged || md.BID() != newBID { // This can happen if CR got kicked off in some other way and // completed before we took the lock to process this // notification. fbo.log.CDebugf(ctx, "Ignoring stale branch change: md=%v, newBID=%d", md, newBID) return } // Everything we thought we knew about quota reclamation is now // called into question. fbo.fbm.clearLastQRData() // Kick off conflict resolution and set the head to the correct branch. fbo.setBranchIDLocked(lState, newBID) fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) err = fbo.setHeadSuccessorLocked(ctx, lState, md, true /*rebased*/) if err != nil { fbo.log.CWarningf(ctx, "Could not set head on journal branch change: %v", err) return } } func (fbo *folderBranchOps) onTLFBranchChange(newBID BranchID) { fbo.branchChanges.Add(1) go func() { defer fbo.branchChanges.Done() ctx, cancelFunc := fbo.newCtxWithFBOID() defer cancelFunc() // This only happens on a `PruneBranch` call, in which case we // would have already updated fbo's local view of the branch/head. if newBID == NullBranchID { fbo.log.CDebugf(ctx, "Ignoring branch change back to master") return } fbo.handleTLFBranchChange(ctx, newBID) }() } func (fbo *folderBranchOps) handleMDFlush(ctx context.Context, bid BranchID, rev kbfsmd.Revision) { fbo.log.CDebugf(ctx, "Considering archiving references for flushed MD revision %d", rev) lState := makeFBOLockState() func() { fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) fbo.setLatestMergedRevisionLocked(ctx, lState, rev, false) }() // Get that revision. rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID, rev, Merged, nil) if err != nil { fbo.log.CWarningf(ctx, "Couldn't get revision %d for archiving: %v", rev, err) return } if err := isArchivableMDOrError(rmd.ReadOnly()); err != nil { fbo.log.CDebugf( ctx, "Skipping archiving references for flushed MD revision %d: %s", rev, err) return } fbo.fbm.archiveUnrefBlocks(rmd.ReadOnly()) } func (fbo *folderBranchOps) onMDFlush(bid BranchID, rev kbfsmd.Revision) { fbo.mdFlushes.Add(1) go func() { defer fbo.mdFlushes.Done() ctx, cancelFunc := fbo.newCtxWithFBOID() defer cancelFunc() if bid != NullBranchID { fbo.log.CDebugf(ctx, "Ignoring MD flush on branch %v for "+ "revision %d", bid, rev) return } fbo.handleMDFlush(ctx, bid, rev) }() } // TeamNameChanged implements the KBFSOps interface for folderBranchOps func (fbo *folderBranchOps) TeamNameChanged( ctx context.Context, tid keybase1.TeamID) { ctx, cancelFunc := fbo.newCtxWithFBOID() defer cancelFunc() fbo.log.CDebugf(ctx, "Starting name change for team %s", tid) newName, err := fbo.config.KBPKI().GetNormalizedUsername( ctx, tid.AsUserOrTeam()) if err != nil { fbo.log.CWarningf(ctx, "Error getting new team name: %+v", err) return } lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) if fbo.head == (ImmutableRootMetadata{}) { fbo.log.CWarningf(ctx, "No head to update") return } oldHandle := fbo.head.GetTlfHandle() if string(oldHandle.GetCanonicalName()) == string(newName) { fbo.log.CDebugf(ctx, "Name didn't change: %s", newName) return } if oldHandle.FirstResolvedWriter() != tid.AsUserOrTeam() { fbo.log.CWarningf(ctx, "Old handle doesn't include changed team ID: %s", oldHandle.FirstResolvedWriter()) return } // Make a copy of `head` with the new handle. newHandle := oldHandle.deepCopy() newHandle.name = CanonicalTlfName(newName) newHandle.resolvedWriters[tid.AsUserOrTeam()] = newName newHead, err := fbo.head.deepCopy(fbo.config.Codec()) if err != nil { fbo.log.CWarningf(ctx, "Error copying head: %+v", err) return } newHead.tlfHandle = newHandle fbo.log.CDebugf(ctx, "Team name changed from %s to %s", oldHandle.GetCanonicalName(), newHandle.GetCanonicalName()) fbo.head = MakeImmutableRootMetadata( newHead, fbo.head.lastWriterVerifyingKey, fbo.head.mdID, fbo.head.localTimestamp, fbo.head.putToServer) if err != nil { fbo.log.CWarningf(ctx, "Error setting head: %+v", err) return } fbo.observers.tlfHandleChange(ctx, newHandle) } // GetUpdateHistory implements the KBFSOps interface for folderBranchOps func (fbo *folderBranchOps) GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) (history TLFUpdateHistory, err error) { fbo.log.CDebugf(ctx, "GetUpdateHistory") defer func() { fbo.deferLog.CDebugf(ctx, "GetUpdateHistory done: %+v", err) }() if folderBranch != fbo.folderBranch { return TLFUpdateHistory{}, WrongOpsError{fbo.folderBranch, folderBranch} } rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), kbfsmd.RevisionInitial, nil) if err != nil { return TLFUpdateHistory{}, err } if len(rmds) > 0 { rmd := rmds[len(rmds)-1] history.ID = rmd.TlfID().String() history.Name = rmd.GetTlfHandle().GetCanonicalPath() } history.Updates = make([]UpdateSummary, 0, len(rmds)) writerNames := make(map[keybase1.UID]string) for _, rmd := range rmds { writer, ok := writerNames[rmd.LastModifyingWriter()] if !ok { name, err := fbo.config.KBPKI().GetNormalizedUsername( ctx, rmd.LastModifyingWriter().AsUserOrTeam()) if err != nil { return TLFUpdateHistory{}, err } writer = string(name) writerNames[rmd.LastModifyingWriter()] = writer } updateSummary := UpdateSummary{ Revision: rmd.Revision(), Date: rmd.localTimestamp, Writer: writer, LiveBytes: rmd.DiskUsage(), Ops: make([]OpSummary, 0, len(rmd.data.Changes.Ops)), } for _, op := range rmd.data.Changes.Ops { opSummary := OpSummary{ Op: op.String(), Refs: make([]string, 0, len(op.Refs())), Unrefs: make([]string, 0, len(op.Unrefs())), Updates: make(map[string]string), } for _, ptr := range op.Refs() { opSummary.Refs = append(opSummary.Refs, ptr.String()) } for _, ptr := range op.Unrefs() { opSummary.Unrefs = append(opSummary.Unrefs, ptr.String()) } for _, update := range op.allUpdates() { opSummary.Updates[update.Unref.String()] = update.Ref.String() } updateSummary.Ops = append(updateSummary.Ops, opSummary) } history.Updates = append(history.Updates, updateSummary) } return history, nil } // GetEditHistory implements the KBFSOps interface for folderBranchOps func (fbo *folderBranchOps) GetEditHistory(ctx context.Context, folderBranch FolderBranch) (edits TlfWriterEdits, err error) { fbo.log.CDebugf(ctx, "GetEditHistory") defer func() { fbo.deferLog.CDebugf(ctx, "GetEditHistory done: %+v", err) }() if folderBranch != fbo.folderBranch { return nil, WrongOpsError{fbo.folderBranch, folderBranch} } lState := makeFBOLockState() head, err := fbo.getMDForReadNeedIdentify(ctx, lState) if err != nil { return nil, err } return fbo.editHistory.GetComplete(ctx, head) } // PushStatusChange forces a new status be fetched by status listeners. func (fbo *folderBranchOps) PushStatusChange() { fbo.config.KBFSOps().PushStatusChange() } // ClearPrivateFolderMD implements the KBFSOps interface for // folderBranchOps. func (fbo *folderBranchOps) ClearPrivateFolderMD(ctx context.Context) { if fbo.folderBranch.Tlf.Type() == tlf.Public { return } lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) if fbo.head == (ImmutableRootMetadata{}) { // Nothing to clear. return } fbo.log.CDebugf(ctx, "Clearing folder MD") // First cancel the background goroutine that's registered for // updates, because the next time we set the head in this FBO // we'll launch another one. fbo.cancelUpdatesLock.Lock() defer fbo.cancelUpdatesLock.Unlock() if fbo.cancelUpdates != nil { fbo.cancelUpdates() select { case <-fbo.updateDoneChan: case <-ctx.Done(): fbo.log.CDebugf( ctx, "Context canceled before updater was canceled") return } fbo.config.MDServer().CancelRegistration(ctx, fbo.id()) } fbo.head = ImmutableRootMetadata{} fbo.headStatus = headUntrusted fbo.latestMergedRevision = kbfsmd.RevisionUninitialized fbo.hasBeenCleared = true } // ForceFastForward implements the KBFSOps interface for // folderBranchOps. func (fbo *folderBranchOps) ForceFastForward(ctx context.Context) { lState := makeFBOLockState() fbo.headLock.RLock(lState) defer fbo.headLock.RUnlock(lState) if fbo.head != (ImmutableRootMetadata{}) { // We're already up to date. return } if !fbo.hasBeenCleared { // No reason to fast-forward here if it hasn't ever been // cleared. return } fbo.forcedFastForwards.Add(1) go func() { defer fbo.forcedFastForwards.Done() ctx, cancelFunc := fbo.newCtxWithFBOID() defer cancelFunc() fbo.log.CDebugf(ctx, "Forcing a fast-forward") currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id(), nil) if err != nil { fbo.log.CDebugf(ctx, "Fast-forward failed: %v", err) return } if currHead == (ImmutableRootMetadata{}) { fbo.log.CDebugf(ctx, "No MD yet") return } fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision()) lState := makeFBOLockState() fbo.mdWriterLock.Lock(lState) defer fbo.mdWriterLock.Unlock(lState) fbo.headLock.Lock(lState) defer fbo.headLock.Unlock(lState) if fbo.head != (ImmutableRootMetadata{}) { // We're already up to date. fbo.log.CDebugf(ctx, "Already up-to-date: %v", err) return } err = fbo.doFastForwardLocked(ctx, lState, currHead) if err != nil { fbo.log.CDebugf(ctx, "Fast-forward failed: %v", err) } }() } // PushConnectionStatusChange pushes human readable connection status changes. func (fbo *folderBranchOps) PushConnectionStatusChange(service string, newStatus error) { fbo.config.KBFSOps().PushConnectionStatusChange(service, newStatus) }
1
18,172
the check can now be `if newLen < 0`
keybase-kbfs
go
@@ -8,18 +8,13 @@ import ( "os" "time" - "github.com/google/uuid" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/pflag" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/tools/leaderelection" - "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/klog" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "sigs.k8s.io/controller-runtime/pkg/client/config"
1
package main import ( "context" "flag" golog "log" "net/http" "os" "time" "github.com/google/uuid" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/pflag" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/klog" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" oappsv1 "github.com/openshift/api/apps/v1" orbacv1 "github.com/openshift/api/authorization/v1" _ "github.com/openshift/generic-admission-server/pkg/cmd" "github.com/openshift/hive/apis" "github.com/openshift/hive/pkg/operator" "github.com/openshift/hive/pkg/operator/hive" utillogrus "github.com/openshift/hive/pkg/util/logrus" "github.com/openshift/hive/pkg/version" ) const ( defaultLogLevel = "info" leaderElectionConfigMap = "hive-operator-leader" leaderElectionLeaseDuration = "120s" leaderElectionRenewDeadline = "90s" leaderElectionRetryPeriod = "30s" ) type controllerManagerOptions struct { LogLevel string } func newRootCommand() *cobra.Command { opts := &controllerManagerOptions{} cmd := &cobra.Command{ Use: "hive-operator", Short: "OpenShift Hive Operator", Run: func(cmd *cobra.Command, args []string) { // Set log level level, err := log.ParseLevel(opts.LogLevel) if err != nil { log.WithError(err).Fatal("Cannot parse log level") } log.SetLevel(level) log.Infof("Version: %s", version.String()) log.Debug("debug logging enabled") // Parse leader election options leaseDuration, err := time.ParseDuration(leaderElectionLeaseDuration) if err != nil { log.WithError(err).Fatal("Cannot parse lease duration") } renewDeadline, err := time.ParseDuration(leaderElectionRenewDeadline) if err != nil { log.WithError(err).Fatal("Cannot parse renew deadline") } retryPeriod, err := time.ParseDuration(leaderElectionRetryPeriod) if err != nil { log.WithError(err).Fatal("Cannot parse retry period") } // Get a config to talk to the apiserver cfg, err := config.GetConfig() if err != nil { log.Fatal(err) } // We must be provided an env var indicating where the hive-operator is running. Normally // passed by the hive-operator Deployment using the kube downward API. operatorNS := os.Getenv(hive.HiveOperatorNamespaceEnvVar) if operatorNS == "" { log.Fatalf("%s env var is unset, unable to determine namespace operator is running in", hive.HiveOperatorNamespaceEnvVar) } // Create and start liveness and readiness probe endpoints http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) http.HandleFunc("/readyz", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) log.Info("Starting /healthz and /readyz endpoints") go http.ListenAndServe(":8080", nil) // use a Go context so we can tell the leaderelection code when we want to step down ctx, cancel := context.WithCancel(context.Background()) defer cancel() run := func(ctx context.Context) { // Create a new Cmd to provide shared dependencies and start components mgr, err := manager.New(cfg, manager.Options{ MetricsBindAddress: ":2112", Logger: utillogrus.NewLogr(log.StandardLogger()), }) if err != nil { log.Fatal(err) } log.Info("Registering Components.") // Setup Scheme for all resources if err := apis.AddToScheme(mgr.GetScheme()); err != nil { log.Fatal(err) } if err := apiregistrationv1.AddToScheme(mgr.GetScheme()); err != nil { log.Fatal(err) } if err := apiextv1.AddToScheme(mgr.GetScheme()); err != nil { log.Fatal(err) } if err := oappsv1.Install(mgr.GetScheme()); err != nil { log.Fatal(err) } if err := orbacv1.Install(mgr.GetScheme()); err != nil { log.Fatal(err) } // Setup all Controllers if err := operator.AddToOperator(mgr); err != nil { log.Fatal(err) } log.Info("Starting the Cmd.") // Start the Cmd err = mgr.Start(signals.SetupSignalHandler()) if err != nil { log.WithError(err).Error("error running manager") } // Canceling the leader election context cancel() } // Leader election code based on: // https://github.com/kubernetes/kubernetes/blob/f7e3bcdec2e090b7361a61e21c20b3dbbb41b7f0/staging/src/k8s.io/client-go/examples/leader-election/main.go#L92-L154 // This gives us ReleaseOnCancel which is not presently exposed in controller-runtime. id := uuid.New().String() leLog := log.WithField("id", id) leLog.Info("generated leader election ID") lock := &resourcelock.ConfigMapLock{ ConfigMapMeta: metav1.ObjectMeta{ Namespace: operatorNS, Name: leaderElectionConfigMap, }, Client: kubernetes.NewForConfigOrDie(cfg).CoreV1(), LockConfig: resourcelock.ResourceLockConfig{ Identity: id, }, } // start the leader election code loop leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ Lock: lock, ReleaseOnCancel: true, LeaseDuration: leaseDuration, RenewDeadline: renewDeadline, RetryPeriod: retryPeriod, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { run(ctx) }, OnStoppedLeading: func() { // we can do cleanup here if necessary leLog.Infof("leader lost") os.Exit(0) }, OnNewLeader: func(identity string) { if identity == id { // We just became the leader leLog.Info("became leader") return } log.Infof("current leader: %s", identity) }, }, }) }, } cmd.PersistentFlags().StringVar(&opts.LogLevel, "log-level", defaultLogLevel, "Log level (debug,info,warn,error,fatal)") cmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) initializeKlog(cmd.PersistentFlags()) flag.CommandLine.Parse([]string{}) return cmd } func initializeKlog(flags *pflag.FlagSet) { golog.SetOutput(klogWriter{}) // Redirect all regular go log output to klog golog.SetFlags(0) go wait.Forever(klog.Flush, 5*time.Second) // Periodically flush logs f := flags.Lookup("logtostderr") // Default to logging to stderr if f != nil { f.Value.Set("true") } } type klogWriter struct{} func (writer klogWriter) Write(data []byte) (n int, err error) { klog.Info(string(data)) return len(data), nil } func main() { defer klog.Flush() cmd := newRootCommand() err := cmd.Execute() if err != nil { log.Fatal(err) } }
1
20,955
If we're going to clean up this package, can we also remove those two `_` imports that appear to serve no purpose? (FWIW, they're also in the manager package too...)
openshift-hive
go
@@ -28,14 +28,13 @@ typedef boost::tokenizer<boost::char_separator<char> > tokenizer; namespace ForceFields { namespace MMFF { -class std::unique_ptr<MMFFAromCollection> MMFFAromCollection::ds_instance = nullptr; - extern const std::uint8_t defaultMMFFArom[]; MMFFAromCollection *MMFFAromCollection::getMMFFArom( const std::uint8_t *mmffArom) { - if (!ds_instance || mmffArom) { - ds_instance.reset(new MMFFAromCollection(mmffArom)); + static std::unique_ptr<MMFFAromCollection> ds_instance(new MMFFAromCollection(nullptr)); + if(mmffArom){ + return new MMFFAromCollection(mmffArom); } return ds_instance.get(); }
1
// $Id$ // // Copyright (C) 2013 Paolo Tosco // // Copyright (C) 2004-2006 Rational Discovery LLC // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #ifdef WIN32 #define _USE_MATH_DEFINES #endif #include <cmath> #include "Params.h" #include <iostream> #include <sstream> #include <RDGeneral/StreamOps.h> #include <boost/lexical_cast.hpp> #include <boost/tokenizer.hpp> #include <Geometry/point.h> typedef boost::tokenizer<boost::char_separator<char> > tokenizer; namespace ForceFields { namespace MMFF { class std::unique_ptr<MMFFAromCollection> MMFFAromCollection::ds_instance = nullptr; extern const std::uint8_t defaultMMFFArom[]; MMFFAromCollection *MMFFAromCollection::getMMFFArom( const std::uint8_t *mmffArom) { if (!ds_instance || mmffArom) { ds_instance.reset(new MMFFAromCollection(mmffArom)); } return ds_instance.get(); } MMFFAromCollection::MMFFAromCollection(const std::uint8_t *mmffArom) { if (!mmffArom) { mmffArom = defaultMMFFArom; } for (unsigned int i = 0; i < sizeof(mmffArom) / sizeof(std::uint8_t); ++i) { d_params.push_back(mmffArom[i]); } } const std::uint8_t defaultMMFFArom[] = {37, 38, 39, 44, 58, 59, 63, 64, 65, 66, 69, 76, 78, 79, 80, 81, 82}; class std::unique_ptr<MMFFDefCollection> MMFFDefCollection::ds_instance = nullptr; extern const std::string defaultMMFFDef; MMFFDefCollection *MMFFDefCollection::getMMFFDef(const std::string &mmffDef) { if (!ds_instance || !mmffDef.empty()) { ds_instance.reset(new MMFFDefCollection(mmffDef)); } return ds_instance.get(); } MMFFDefCollection::MMFFDefCollection(std::string mmffDef) { if (mmffDef.empty()) { mmffDef = defaultMMFFDef; } std::istringstream inStream(mmffDef); std::string inLine = RDKit::getLine(inStream); unsigned int oldAtomType = 0; unsigned int atomType; while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFDef mmffDefObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); // skip first token ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int atomType = boost::lexical_cast<unsigned int>(*token); #else atomType = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); #endif ++token; // Level 2 (currently = Level 1, see MMFF.I page 513) mmffDefObj.eqLevel[0] = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; // Level 3 mmffDefObj.eqLevel[1] = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; // Level 4 mmffDefObj.eqLevel[2] = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; // Level 5 mmffDefObj.eqLevel[3] = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; if (atomType != oldAtomType) { #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[atomType] = mmffDefObj; #else d_params.push_back(mmffDefObj); #endif oldAtomType = atomType; } } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFDef = "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* PRIMARY\n" "* MMFF MMFF \n" "*SYMBOL TYPE DEFAULT TYPES DEFINITION \n" "*\n" "CR 1 1 1 1 0 ALKYL CARBON \n" "C=C 2 2 2 1 0 VINYLIC \n" "* CSP2 2 2 2 1 0 GENERIC CSP2 \n" "* CGD 2 2 2 1 0 GUANIDINE CARBON " "\n" "C=O 3 3 3 1 0 GENERAL CARBONYL " "C \n" "* C=N 3 3 3 1 0 C=N \n" "* C=OR 3 3 3 1 0 KETONE OR " "ALDEHYDE CO \n" "* C=ON 3 3 3 1 0 AMIDE CARBONYL " "\n" "* COO 3 3 3 1 0 CARBOXYLIC ACID " "OF EST \n" "* COON 3 3 3 1 0 CARBAMATE " "CARBONYL \n" "* COOO 3 3 3 1 0 CARBONIC ACID " "OR ESTER \n" "* C=OS 3 3 3 1 0 THIOESTER, C=O " "\n" "* C=S 3 3 3 1 0 THIOESTER, C=S " "\n" "* C=SN 3 3 3 1 0 THIOAMIDE \n" "* CSO2 3 3 3 1 0 CARBON IN >C=SO2 " "\n" "CSP 4 4 4 1 0 ACETYLENIC C " "\n" "* =C= 4 4 4 1 0 ALLENIC C \n" "HC 5 5 5 5 0 H-C \n" "* HSI 5 5 5 5 0 H-SI \n" "* HP 5 5 5 5 0 H-P \n" "OR 6 6 6 6 0 O-CSP3 \n" "* OH2 6 6 6 6 0 OXYGEN IN H2O " "\n" "* OC=O 6 6 6 6 0 ESTER OR ACID " "-O- \n" "* OC=C 6 6 6 6 0 ENOL OR " "PHEMOLIC O \n" "* OC=N 6 6 6 6 0 OXYGEN IN -O-C=N " "MOIETY\n" "* OSO3 6 6 6 6 0 DIVALENT O " "IN SULFATE\n" "* OSO2 6 6 6 6 0 DIVALENT O " "IN SULFITE\n" "* OSO 6 6 6 6 0 PAIR OF " "DIVALENT O ON S\n" "* -OS 6 6 6 6 0 OTHER DIVALENT " "O ON S\n" "* OPO3 6 6 6 6 0 DIVALENT O " "IN PHOSPHATE\n" "* OPO2 6 6 6 6 0 DIVALENT O " "IN PHOSPHITE\n" "* OPO 6 6 6 6 0 PAIR OF " "DIVALENT O ON P\n" "* -OP 6 6 6 6 0 OTHER DIVALENT " "O ON P\n" "* -O- 6 6 6 6 0 GENERAL DIVALENT " "OX \n" "O=C 7 7 7 6 0 O=C, GENERIC \n" "* O=CN 7 7 7 6 0 O=C IN AMIDES " "\n" "* O=CR 7 7 7 6 0 O=C IN KET, " "ALD \n" "* O=CO 7 7 7 6 0 O=C IN ACIDS, " "ESTERS \n" "* O=S 7 7 7 6 0 TERMINAL O " "SULFOXIDES \n" "* O=N 7 7 7 6 0 NITROSO-GROUP OXYGEN " "\n" "NR 8 8 8 8 0 AMINE N \n" "N=C 9 9 9 8 0 N=C, IMINES \n" "* N=N 9 9 9 8 0 N=N, AZO " "COMPOUNDS \n" "NC=O 10 10 10 8 0 N-C=O, AMIDES \n" "* NC=S 10 10 10 8 0 N-C=S (DELOC LP) " "\n" "* NN=C 10 10 10 8 0 N-N=C (DELOC LP) " "\n" "* NN=N 10 10 10 8 0 N-N=N (DELOC LP) " "\n" "F 11 11 11 11 0 FLUORINE \n" "CL 12 12 12 12 0 CHLORINE \n" "BR 13 13 13 13 0 BROMINE \n" "I 14 14 14 14 0 IODINE \n" "S 15 15 15 15 0 THIOL, SULFIDE \n" "S=C 16 16 16 15 0 S DOUBLY BONDED " "TO C \n" "S=O 17 17 17 15 0 SULFOXIDE S " "\n" "SO2 18 18 18 15 0 SULFONE S \n" "* SO2N 18 18 18 15 0 SULFONAMIDE S " "\n" "* SO3 18 18 18 15 0 SULFONATE S " "\n" "* =SO2 18 18 18 15 0 OXYGENATED SULFONE " "S \n" "* SNO 18 18 18 15 0 NITROGEN ANALOG " "OF SO2 \n" "SI 19 19 19 19 0 SILICON \n" "CR4R 20 20 1 1 0 C IN " "CYCLOBUTYL \n" "HOR 21 21 21 5 0 H-O, ALCOHOLS " "\n" "* HO 21 21 21 5 0 GENERAL H ON " "O \n" "CR3R 22 22 22 1 0 C IN " "CYCLOPROPLY \n" "HNR 23 23 23 5 0 H-N, AMINES \n" "* H3N 23 23 23 5 0 H, AMMONIA \n" "* HPYL 23 23 23 5 0 H-N IN PYRROLE " "\n" "* HN 23 23 23 5 0 GENERAL H-N \n" "HOCO 24 24 24 5 0 H-O, ACIDS \n" "* HOP 24 21 21 5 0 H-O-P, PHOS ACIDS " "\n" "PO4 25 25 25 25 0 PHOSPHODIESTER \n" "* PO3 25 25 25 25 0 TETRACRD P, " "3 OXYGENS \n" "* PO2 25 25 25 25 0 TETRACRD P, " "2 OXYGENS \n" "* PO 25 25 25 25 0 TETRACRD P, " "2 OXYGENS \n" "* PTET 25 25 25 25 0 GENERAL TETRACRD " "P \n" "P 26 26 26 25 0 TRICOORDINATE P \n" "HN=C 27 27 28 5 0 IMINE N-H \n" "* HN=N 27 27 28 5 0 AZO N-H \n" "HNCO 28 28 28 5 0 H-N, AMIDES \n" "* HNCC 28 28 28 5 0 H-N, ENAMINES " "\n" "* HNCS 28 28 28 5 0 H-N, THIOAMIDES " "\n" "* HNCN 28 28 28 5 0 H-N, HN-C=N \n" "* HNNC 28 28 28 5 0 H-N, HN-N=C \n" "* HNNN 28 28 28 5 0 H-N, HN-N=N \n" "* HSP2 28 28 28 5 0 GENERAL H ON " "SP2 N \n" "HOCC 29 29 29 5 0 H-O, ENOLS, PHENOLS " "\n" "* HOCN 29 29 29 5 0 H-O IN HO-C=N " "\n" "CE4R 30 30 2 1 0 C=C IN 4-RING " "\n" "HOH 31 31 31 31 0 H-OH \n" "O2CM 32 32 7 6 0 O, CARBOXYLATE " "ANION \n" "* OXN 32 32 7 6 0 OXIDE ON " "NITROHGEN \n" "* O2N 32 32 7 6 0 NITRO-GROUP OXYGEN " "\n" "* O2NO 32 32 7 6 0 NITRO-GROUP IN " "NITRATE \n" "* O3N 32 32 7 6 0 NITRATE ANION OXYGEN " "\n" "* O-S 32 32 7 6 0 SINGLE TERM O " "ON TET S \n" "* O2S 32 32 7 6 0 SULFONES, " "SULFONAMIDES \n" "* O3S 32 32 7 6 0 SULFONATES, TERM " "OX \n" "* O4S 32 32 7 6 0 SO4(3-) \n" "* OSMS 32 32 7 6 0 THIOSULFINATE O " "(-1/2) \n" "* OP 32 32 7 6 0 TERMINAL O, " "O-P \n" "* O2P 32 32 7 6 0 TERMINAL O, " "O2P GROUP \n" "* O3P 32 32 7 6 0 TERMINAL O, " "O3P GROUP \n" "* O4P 32 32 7 6 0 TERMINAL O, " "PO4(-3) \n" "* O4CL 32 32 7 6 0 TERMINAL O " "IN CLO4(-) \n" "HOS 33 33 21 5 0 H-O-S, SULF ACIDS " "\n" "NR+ 34 34 8 8 0 N+, QUATERNARY " "N \n" "OM 35 35 6 6 0 OXIDE OXYGEN ON " "SP3 C \n" "* OM2 35 35 6 6 0 OXIDE OXYGEN ON " "SP2 C \n" "HNR+ 36 36 36 5 0 H-N+ \n" "* HNN+ 36 36 36 5 0 H ON " "IMIDAZOLIUM N \n" "* HNC+ 36 36 36 5 0 H ON " "PROTONATED N+=C-N \n" "* HGD+ 36 36 36 5 0 H ON " "GUANIDINIUM N \n" "CB 37 37 2 1 0 AROMATIC C \n" "NPYD 38 38 9 8 0 AROMATIC N, " "PYRIDINE \n" "NPYL 39 39 10 8 0 AROMATIC N, " "PYRROLE \n" "NC=C 40 40 10 8 0 N-C=C (DELOC LP) " "\n" "* NC=N 40 40 10 8 0 N-C=N (DELOC LP) " "\n" "CO2M 41 41 3 1 0 C IN CO2- " "ANION \n" "* CS2M 41 41 3 1 0 THIOCARBOXYLATE C " "\n" "NSP 42 42 42 8 0 N TRIPLE BONDED " "\n" "NSO2 43 43 10 8 0 N, SULFONAMIDES " "\n" "STHI 44 44 16 15 0 S IN " "THIOPHENE \n" "NO2 45 45 10 8 0 NITRO GROUP N " "\n" "* NO3 45 45 10 8 0 NITRATE GROUP N " "\n" "N=O 46 46 9 8 0 NITROSO GROUP N " "\n" "NAZT 47 47 42 8 0 TERMINAL N, " "AZIDE \n" "NSO 48 48 9 8 0 DIVAL. N IN " "S(N)(O) GP \n" "O+ 49 49 6 6 0 OXONIUM (TRICOORD) " "O \n" "HO+ 50 50 21 5 0 H ON OXONIUM " "OXYGEN \n" "O=+ 51 51 7 6 0 OXENIUM OXYGEN+ \n" "HO=+ 52 52 21 5 0 H ON OXENIUM " "O+ \n" "=N= 53 53 42 8 0 N TWICE DOUBLE " "BONDED \n" "N+=C 54 54 9 8 0 IMINIUM NITROGEN " "\n" "* N+=N 54 54 9 8 0 AZONIUM NITROGEN " "\n" "NCN+ 55 55 10 8 0 N IN +N=C-N: " "; Q=1/2 \n" "NGD+ 56 56 10 8 0 GUANIDINIUM N; " "Q=1/3 \n" "CGD+ 57 57 2 1 0 GUANIDINIUM CARBON " "\n" "* CNN+ 57 57 2 1 0 C IN +N=C-N " "RESONANCE \n" "NPD+ 58 58 10 8 0 N PYRIDINIUM " "ION \n" "OFUR 59 59 6 6 0 AROMATIC O, " "FURAN \n" "C% 60 60 4 1 0 ISONITRILE CARBON \n" "NR% 61 61 42 8 0 ISONITRILE N " "\n" "NM 62 62 10 8 0 SULFONAMIDE N- \n" "C5A 63 63 2 1 0 ALPHA AROM 5-RING " "C \n" "C5B 64 64 2 1 0 BETA AROM 5-RING " "C \n" "N5A 65 65 9 8 0 ALPHA AROM 5-RING " "N \n" "N5B 66 66 9 8 0 ALPHA AROM 5-RING " "N \n" "N2OX 67 67 9 8 0 NITROGEN IN " "N-OXIDE \n" "N3OX 68 68 8 8 0 NITROGEN IN " "N-OXIDE \n" "NPOX 69 69 9 8 0 NITROGEN IN " "N-OXIDE \n" "OH2 70 70 70 70 70 OXYGEN IN WATER " "\n" "HS 71 71 5 5 0 H-S \n" "S2CM 72 72 16 15 0 THIOCARBOXYLATE S " "\n" "* S-P 72 72 16 15 0 TERMINAL SULFUR " "ON P \n" "* SM 72 72 16 15 0 TERMINAL SULFUR " "ON C \n" "* SSMO 72 72 16 15 0 TERM S, " "THIOSULFINATE \n" "SO2M 73 73 18 15 0 SULFUR IN " "SULFINATE \n" "* SSOM 73 73 18 15 0 SULFUR, THIOSULFINATE " "\n" "=S=O 74 74 17 15 0 SULFINYL SULFUR, " "C=S=O \n" "-P=C 75 75 26 25 0 P DOUBLY BONDED " "TO C\n" "N5M 76 76 9 8 0 NEG N IN " "TETRAZOLE AN \n" "CLO4 77 77 12 12 0 CHLORINE IN " "CLO4(-) \n" "C5 78 78 2 1 0 GENERAL AROM 5-RING " "C \n" "N5 79 79 9 8 0 GENERAL AROM 5-RING " "N \n" "CIM+ 80 80 2 1 0 C IN N-C-N, " "IM+ ION \n" "NIM+ 81 81 10 8 0 N IN N-C-N, " "IM+ ION \n" "N5AX 82 82 9 8 0 5R NITROGEN " "IN N-OXIDE \n" "* N5BX 82 82 9 8 0 5R NITROGEN " "IN N-OXIDE \n" "* N5OX 82 82 9 8 0 5R NITROGEN " "IN N-OXIDE \n" "FE+2 87 87 87 87 87 IRON +2 " "CATION\n" "FE+3 88 88 88 88 88 IRON +3 " "CATION\n" "F- 89 89 89 89 89 FLUORIDE ANION\n" "CL- 90 90 90 90 90 CHLORIDE ANION\n" "BR- 91 91 91 91 91 BROMIDE ANION\n" "LI+ 92 92 92 92 92 LITHIUM CATION \n" "NA+ 93 93 93 93 93 SODIUM CATION \n" "K+ 94 94 94 94 94 POTASSIUM CATION \n" "ZN+2 95 95 95 95 95 DIPOSITIVE ZINC " "CATION \n" "* ZINC 95 95 95 95 95 DIPOSITIVE ZINC " "CATION \n" "CA+2 96 96 96 96 96 DIPOSITIVE CALCIUM " "CATION\n" "CU+1 97 97 97 97 97 MONOPOSITIVE COPPER " "CATION\n" "CU+2 98 98 98 98 98 DIPOSITIVE COPPER " "CATION\n" "MG+2 99 99 99 99 99 DIPOSITIVE " "MAGNESIUM CATION\n"; class std::unique_ptr<MMFFPropCollection> MMFFPropCollection::ds_instance = nullptr; extern const std::string defaultMMFFProp; MMFFPropCollection *MMFFPropCollection::getMMFFProp( const std::string &mmffProp) { if (!ds_instance || !mmffProp.empty()) { ds_instance.reset(new MMFFPropCollection(mmffProp)); } return ds_instance.get(); } MMFFPropCollection::MMFFPropCollection(std::string mmffProp) { if (mmffProp.empty()) { mmffProp = defaultMMFFProp; } std::istringstream inStream(mmffProp); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFProp mmffPropObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int atomType = boost::lexical_cast<unsigned int>(*token); #else d_iAtomType.push_back( (std::uint8_t)boost::lexical_cast<unsigned int>(*token)); #endif ++token; mmffPropObj.atno = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; mmffPropObj.crd = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; mmffPropObj.val = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; mmffPropObj.pilp = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; mmffPropObj.mltb = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; mmffPropObj.arom = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; mmffPropObj.linh = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; mmffPropObj.sbmb = (std::uint8_t)(boost::lexical_cast<unsigned int>(*token)); ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[atomType] = mmffPropObj; #else d_params.push_back(mmffPropObj); #endif } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFProp = "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* MMFFPROP - MMFF atom-type properties\n" "*\n" "* atype aspec crd val pilp mltb arom lin sbmb\n" "1 6 4 4 0 0 0 0 0\n" "2 6 3 4 0 2 0 0 1\n" "3 6 3 4 0 2 0 0 1\n" "4 6 2 4 0 3 0 1 1\n" "5 1 1 1 0 0 0 0 0\n" "6 8 2 2 1 0 0 0 0\n" "7 8 1 2 0 2 0 0 0\n" "8 7 3 3 1 0 0 0 0\n" "9 7 2 3 0 2 0 0 1\n" "10 7 3 3 1 1 0 0 0\n" "11 9 1 1 1 0 0 0 0\n" "12 17 1 1 1 0 0 0 0\n" "13 35 1 1 1 0 0 0 0\n" "14 53 1 1 1 0 0 0 0\n" "15 16 2 2 1 0 0 0 0\n" "16 16 1 2 0 2 0 0 0\n" "17 16 3 4 0 2 0 0 0\n" "18 16 4 4 0 0 0 0 0\n" "19 14 4 4 0 0 0 0 0\n" "20 6 4 4 0 0 0 0 0\n" "21 1 1 1 0 0 0 0 0\n" "22 6 4 4 0 0 0 0 0\n" "23 1 1 1 0 0 0 0 0\n" "24 1 1 1 0 0 0 0 0\n" "25 15 4 4 0 0 0 0 0\n" "26 15 3 3 1 0 0 0 0\n" "27 1 1 1 0 0 0 0 0\n" "28 1 1 1 0 0 0 0 0\n" "29 1 1 1 0 0 0 0 0\n" "30 6 3 4 0 2 0 0 1\n" "31 1 1 1 0 0 0 0 0\n" "32 8 1 12 1 1 0 0 0\n" "33 1 1 1 0 0 0 0 0\n" "34 7 4 4 0 0 0 0 0\n" "35 8 1 1 1 1 0 0 0\n" "36 1 1 1 0 0 0 0 0\n" "37 6 3 4 0 2 1 0 1\n" "38 7 2 3 0 2 1 0 0\n" "39 7 3 3 1 1 1 0 1\n" "40 7 3 3 1 0 0 0 0\n" "41 6 3 4 0 1 0 0 0\n" "42 7 1 3 0 3 0 0 0\n" "43 7 3 3 1 0 0 0 0\n" "44 16 2 2 1 1 1 0 0\n" "45 7 3 4 0 2 0 0 0\n" "46 7 2 3 0 2 0 0 0\n" "47 7 1 2 0 2 0 0 0\n" "48 7 2 2 0 0 0 0 0\n" "49 8 3 3 0 0 0 0 0\n" "50 1 1 1 0 0 0 0 0\n" "51 8 2 3 0 2 0 0 0\n" "52 1 1 1 0 0 0 0 0\n" "53 7 2 4 0 2 0 1 0\n" "54 7 3 4 0 2 0 0 1\n" "55 7 3 34 0 1 0 0 0\n" "56 7 3 34 0 1 0 0 0\n" "57 6 3 4 0 2 0 0 1\n" "58 7 3 4 0 1 1 0 1\n" "59 8 2 2 1 1 1 0 0\n" "60 6 1 3 0 3 0 0 0\n" "61 7 2 4 0 3 0 1 0\n" "62 7 2 2 1 0 0 0 0\n" "63 6 3 4 0 2 1 0 1\n" "64 6 3 4 0 2 1 0 1\n" "65 7 2 3 0 2 1 0 0\n" "66 7 2 3 0 2 1 0 0\n" "67 7 3 4 0 2 0 0 1\n" "68 7 4 4 0 0 0 0 0\n" "69 7 3 4 0 1 1 0 0\n" "70 8 2 2 1 0 0 0 0\n" "71 1 1 1 0 0 0 0 0\n" "72 16 1 1 1 1 0 0 0\n" "73 16 3 3 0 0 0 0 0\n" "74 16 2 4 0 2 0 0 0\n" "75 15 2 3 0 2 0 0 1\n" "76 7 2 2 1 0 0 0 0\n" "77 17 4 4 0 0 0 0 0\n" "78 6 3 4 0 2 1 0 1\n" "79 7 2 3 0 2 1 0 0\n" "80 6 3 4 0 2 0 0 1\n" "81 7 3 4 0 1 1 0 1\n" "82 7 3 4 0 1 1 0 0\n" "87 26 0 0 0 0 0 0 0\n" "88 26 0 0 0 0 0 0 0\n" "89 9 0 0 0 0 0 0 0\n" "90 17 0 0 0 0 0 0 0\n" "91 35 0 0 0 0 0 0 0\n" "92 3 0 0 0 0 0 0 0\n" "93 11 0 0 0 0 0 0 0\n" "94 19 0 0 0 0 0 0 0\n" "95 30 0 0 0 0 0 0 0\n" "96 20 0 0 0 0 0 0 0\n" "97 29 0 0 0 0 0 0 0\n" "98 29 0 0 0 0 0 0 0\n" "99 12 0 0 0 0 0 0 0\n"; class std::unique_ptr<MMFFPBCICollection> MMFFPBCICollection::ds_instance = nullptr; extern const std::string defaultMMFFPBCI; MMFFPBCICollection *MMFFPBCICollection::getMMFFPBCI( const std::string &mmffPBCI) { if (!ds_instance || !mmffPBCI.empty()) { ds_instance.reset(new MMFFPBCICollection(mmffPBCI)); } return ds_instance.get(); } MMFFPBCICollection::MMFFPBCICollection(std::string mmffPBCI) { if (mmffPBCI.empty()) { mmffPBCI = defaultMMFFPBCI; } std::istringstream inStream(mmffPBCI); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFPBCI mmffPBCIObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP // IMPORTANT: skip the first field ++token; unsigned int atomType = boost::lexical_cast<unsigned int>(*token); #else // IMPORTANT: skip the first two fields ++token; #endif ++token; mmffPBCIObj.pbci = boost::lexical_cast<double>(*token); ++token; mmffPBCIObj.fcadj = boost::lexical_cast<double>(*token); ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[atomType] = mmffPBCIObj; #else d_params.push_back(mmffPBCIObj); #endif } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFPBCI = "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* MMFF Partial Bond Charge Incs and Formal-Charge Adj. Factors: " "19-MAY-1994\n" "*\n" "* type pbci fcadj Origin/Comment\n" "0 1 0.000 0.000 E94\n" "0 2 -0.135 0.000 E94\n" "0 3 -0.095 0.000 E94\n" "0 4 -0.200 0.000 E94\n" "0 5 -0.023 0.000 E94\n" "0 6 -0.243 0.000 E94\n" "0 7 -0.687 0.000 E94\n" "0 8 -0.253 0.000 E94\n" "0 9 -0.306 0.000 E94\n" "0 10 -0.244 0.000 E94\n" "0 11 -0.317 0.000 E94\n" "0 12 -0.304 0.000 E94\n" "0 13 -0.238 0.000 E94\n" "0 14 -0.208 0.000 E94\n" "0 15 -0.236 0.000 E94\n" "0 16 -0.475 0.000 E94\n" "0 17 -0.191 0.000 E94\n" "0 18 -0.118 0.000 E94\n" "0 19 0.094 0.000 E94\n" "0 20 -0.019 0.000 E94\n" "0 21 0.157 0.000 E94\n" "0 22 -0.095 0.000 E94\n" "0 23 0.193 0.000 E94\n" "0 24 0.257 0.000 E94\n" "0 25 0.012 0.000 E94\n" "0 26 -0.142 0.000 E94\n" "0 27 0.094 0.000 E94\n" "0 28 0.058 0.000 E94\n" "0 29 0.207 0.000 E94\n" "0 30 -0.166 0.000 E94\n" "0 31 0.161 0.000 E94\n" "0 32 -0.732 0.500 E94\n" "0 33 0.257 0.000 E94\n" "0 34 -0.491 0.000 E94\n" "0 35 -0.456 0.500 E94\n" "0 36 -0.031 0.000 E94\n" "0 37 -0.127 0.000 E94\n" "0 38 -0.437 0.000 E94\n" "0 39 -0.104 0.000 E94\n" "0 40 -0.264 0.000 E94\n" "0 41 0.052 0.000 E94\n" "0 42 -0.757 0.000 E94\n" "0 43 -0.326 0.000 E94\n" "0 44 -0.237 0.000 E94\n" "0 45 -0.260 0.000 E94\n" "0 46 -0.429 0.000 E94\n" "0 47 -0.418 0.000 E94\n" "0 48 -0.525 0.000 E94\n" "0 49 -0.283 0.000 E94\n" "0 50 0.284 0.000 E94\n" "0 51 -1.046 0.000 E94\n" "0 52 -0.546 0.000 E94\n" "0 53 -0.048 0.000 E94\n" "0 54 -0.424 0.000 E94\n" "0 55 -0.476 0.000 E94\n" "0 56 -0.438 0.000 E94\n" "0 57 -0.105 0.000 E94\n" "0 58 -0.488 0.000 E94\n" "0 59 -0.337 0.000 E94\n" "0 60 -0.635 0.000 E94\n" "0 61 -0.265 0.000 E94\n" "0 62 -0.125 0.250 E94\n" "0 63 -0.180 0.000 E94\n" "0 64 -0.181 0.000 E94\n" "0 65 -0.475 0.000 E94\n" "0 66 -0.467 0.000 E94\n" "0 67 -0.099 0.000 == 69\n" "0 68 -0.135 0.000 E94\n" "0 69 -0.099 0.000 E94\n" "0 70 -0.269 0.000 E94\n" "0 71 -0.071 0.000 E94\n" "0 72 -0.580 0.500 E94\n" "0 73 -0.200 0.000 E94\n" "0 74 -0.301 0.000 E94\n" "0 75 -0.255 0.000 E94\n" "0 76 -0.568 0.250 E94\n" "0 77 -0.282 0.000 E94\n" "0 78 -0.168 0.000 E94\n" "0 79 -0.471 0.000 == (65+66)/2\n" "0 80 -0.144 0.000 E94\n" "0 81 -0.514 0.000 E94\n" "0 82 -0.099 0.000 == 69\n" "0 83 0.000 0.000 Unused\n" "0 84 0.000 0.000 Unused\n" "0 85 0.000 0.000 Unused\n" "0 86 0.000 0.000 Unused\n" "0 87 2.000 0.000 Ionic charge\n" "0 88 3.000 0.000 Ionic charge\n" "0 89 -1.000 0.000 Ionic charge\n" "0 90 -1.000 0.000 Ionic charge\n" "0 91 -1.000 0.000 Ionic charge\n" "0 92 1.000 0.000 Ionic charge\n" "0 93 1.000 0.000 Ionic charge\n" "0 94 1.000 0.000 Ionic charge\n" "0 95 2.000 0.000 Ionic charge\n" "0 96 2.000 0.000 Ionic charge\n" "0 97 1.000 0.000 Ionic charge\n" "0 98 2.000 0.000 Ionic charge\n" "0 99 2.000 0.000 Ionic charge\n"; class std::unique_ptr<MMFFChgCollection> MMFFChgCollection::ds_instance = nullptr; extern const std::string defaultMMFFChg; MMFFChgCollection *MMFFChgCollection::getMMFFChg(const std::string &mmffChg) { if (!ds_instance || !mmffChg.empty()) { ds_instance.reset(new MMFFChgCollection(mmffChg)); } return ds_instance.get(); } MMFFChgCollection::MMFFChgCollection(std::string mmffChg) { if (mmffChg.empty()) { mmffChg = defaultMMFFChg; } std::istringstream inStream(mmffChg); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFChg mmffChgObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int bondType = boost::lexical_cast<unsigned int>(*token); #else d_bondType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int iAtomType = boost::lexical_cast<unsigned int>(*token); #else d_iAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int jAtomType = boost::lexical_cast<unsigned int>(*token); #else d_jAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; mmffChgObj.bci = boost::lexical_cast<double>(*token); ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[bondType][iAtomType][jAtomType] = mmffChgObj; #else d_params.push_back(mmffChgObj); #endif } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFChg = "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* MMFF BOND-CHARGE INCREMENTS - Rev: 26-OCT-94 Source: MMFF\n" "* C94 - CORE MMFF parameter, obtained from fit to dipole moments\n" "* #C94 - CORE MMFF parameter, either fixed or adjusted to fit \n" "* HF/6-31G* interaction energies\n" "* X94 - EXTD MMFF parameter, obtained from fit to dipole moments\n" "* #X94 - EXTD MMFF parameter, fixed in the fit to dipole moments\n" "* E94 - derived from partial bond charge increments (empirical rule)\n" "*\n" "* types bci Source\n" "0 1 1 0.0000 #C94\n" "0 1 2 -0.1382 C94\n" "0 1 3 -0.0610 #C94\n" "0 1 4 -0.2000 #X94\n" "0 1 5 0.0000 #C94\n" "0 1 6 -0.2800 #C94\n" "0 1 8 -0.2700 #C94\n" "0 1 9 -0.2460 #C94\n" "0 1 10 -0.3001 C94\n" "0 1 11 -0.3400 #C94\n" "0 1 12 -0.2900 #C94\n" "0 1 13 -0.2300 #X94\n" "0 1 14 -0.1900 #X94\n" "0 1 15 -0.2300 #C94\n" "0 1 17 -0.1935 X94\n" "0 1 18 -0.1052 X94\n" "0 1 19 0.0805 X94\n" "0 1 20 0.0000 #C94\n" "0 1 22 -0.0950 E94\n" "0 1 25 0.0000 #X94\n" "0 1 26 -0.1669 X94\n" "0 1 34 -0.5030 C94\n" "0 1 35 -0.4274 X94\n" "0 1 37 -0.1435 C94\n" "0 1 39 -0.2556 C94\n" "0 1 40 -0.3691 C94\n" "0 1 41 0.1060 #C94\n" "0 1 43 -0.3557 X94\n" "0 1 45 -0.2402 X94\n" "0 1 46 -0.3332 X94\n" "0 1 54 -0.3461 C94\n" "0 1 55 -0.4895 C94\n" "0 1 56 -0.3276 C94\n" "0 1 57 -0.1050 E94\n" "0 1 58 -0.4880 E94\n" "0 1 61 -0.2657 X94\n" "0 1 62 -0.2000 #X94\n" "0 1 63 -0.1800 E94\n" "0 1 64 -0.1810 E94\n" "0 1 67 -0.0990 E94\n" "0 1 68 -0.2560 #C94\n" "0 1 72 -0.5500 #X94\n" "0 1 73 -0.0877 X94\n" "0 1 75 -0.2550 E94\n" "0 1 78 -0.1680 E94\n" "0 1 80 -0.1440 E94\n" "0 1 81 -0.5140 E94\n" "0 2 2 0.0000 #C94\n" "1 2 2 0.0000 #C94\n" "1 2 3 -0.0144 C94\n" "0 2 4 -0.0650 E94\n" "1 2 4 -0.0650 E94\n" "0 2 5 0.1500 #C94\n" "0 2 6 -0.0767 C94\n" "1 2 9 -0.1710 E94\n" "0 2 10 -0.1090 E94\n" "0 2 11 -0.1495 X94\n" "0 2 12 -0.1400 #X94\n" "0 2 13 -0.1100 #X94\n" "0 2 14 -0.0900 #X94\n" "0 2 15 -0.1010 E94\n" "0 2 17 -0.0560 E94\n" "0 2 18 0.0170 E94\n" "0 2 19 0.2290 E94\n" "0 2 20 0.1160 E94\n" "0 2 22 0.0400 E94\n" "0 2 25 0.1470 E94\n" "0 2 30 -0.0310 E94\n" "0 2 34 -0.3560 E94\n" "0 2 35 -0.3500 #X94\n" "1 2 37 0.0284 C94\n" "1 2 39 0.0310 E94\n" "0 2 40 -0.1000 #C94\n" "0 2 41 0.2500 #C94\n" "0 2 43 -0.1910 E94\n" "0 2 45 -0.2044 X94\n" "0 2 46 -0.2940 E94\n" "0 2 55 -0.3410 E94\n" "0 2 56 -0.3030 E94\n" "0 2 62 -0.0500 #X94\n" "1 2 63 -0.0450 E94\n" "1 2 64 -0.0460 E94\n" "1 2 67 0.0360 E94\n" "0 2 72 -0.4500 #X94\n" "1 2 81 -0.3790 E94\n" "1 3 3 0.0000 #C94\n" "1 3 4 -0.1050 E94\n" "0 3 5 0.0600 #C94\n" "0 3 6 -0.1500 #C94\n" "0 3 7 -0.5700 #C94\n" "0 3 9 -0.4500 #C94\n" "1 3 9 -0.2110 E94\n" "0 3 10 -0.0600 C94\n" "0 3 11 -0.2220 E94\n" "0 3 12 -0.2090 E94\n" "0 3 15 -0.1410 E94\n" "0 3 16 -0.3800 #X94\n" "0 3 17 -0.0960 E94\n" "0 3 18 -0.0230 E94\n" "0 3 20 0.0530 #C94\n" "0 3 22 0.0000 E94\n" "0 3 25 0.1070 E94\n" "1 3 30 -0.0710 E94\n" "0 3 35 -0.3610 E94\n" "1 3 37 0.0862 C94\n" "1 3 39 -0.0090 E94\n" "0 3 40 -0.0500 #C94\n" "0 3 41 0.1470 E94\n" "0 3 43 -0.2363 X94\n" "0 3 45 -0.1650 E94\n" "0 3 48 -0.4300 E94\n" "0 3 51 -0.9500 #X94\n" "0 3 53 -0.0134 X94\n" "0 3 54 -0.4000 #C94\n" "1 3 54 -0.3290 E94\n" "0 3 55 -0.3810 E94\n" "0 3 56 -0.3430 E94\n" "1 3 57 -0.0100 E94\n" "1 3 58 -0.3930 E94\n" "0 3 62 -0.0300 E94\n" "1 3 63 -0.0850 E94\n" "1 3 64 -0.0860 E94\n" "0 3 67 -0.0040 E94\n" "0 3 74 -0.3190 X94\n" "0 3 75 -0.2474 X94\n" "1 3 78 -0.0730 E94\n" "1 3 80 -0.0490 E94\n" "0 4 5 0.1770 E94\n" "0 4 6 -0.0430 E94\n" "0 4 7 -0.4870 E94\n" "0 4 9 -0.3000 E94\n" "1 4 9 -0.1060 E94\n" "0 4 10 -0.0440 E94\n" "0 4 15 -0.0360 E94\n" "0 4 20 0.1810 E94\n" "0 4 22 0.1050 E94\n" "0 4 30 0.0340 E94\n" "1 4 37 0.0730 E94\n" "0 4 40 -0.0640 E94\n" "0 4 42 -0.5571 X94\n" "0 4 43 -0.1260 E94\n" "1 4 63 0.0200 E94\n" "1 4 64 0.0190 E94\n" "0 5 19 0.2000 #X94\n" "0 5 20 0.0000 #C94\n" "0 5 22 -0.1000 #C94\n" "0 5 30 -0.1500 #C94\n" "0 5 37 -0.1500 #C94\n" "0 5 41 0.2203 C94\n" "0 5 57 -0.1500 #C94\n" "0 5 63 -0.1500 #C94\n" "0 5 64 -0.1500 #C94\n" "0 5 78 -0.1500 #C94\n" "0 5 80 -0.1500 #C94\n" "0 6 6 0.0000 #C94\n" "0 6 8 -0.1000 #C94\n" "0 6 9 -0.0630 E94\n" "0 6 10 0.0355 C94\n" "0 6 15 0.0070 E94\n" "0 6 17 0.0520 E94\n" "0 6 18 0.1837 X94\n" "0 6 19 0.2974 X94\n" "0 6 20 0.2579 C94\n" "0 6 21 0.4000 #C94\n" "0 6 22 0.1480 E94\n" "0 6 24 0.5000 #C94\n" "0 6 25 0.2712 X94\n" "0 6 26 0.1010 E94\n" "0 6 29 0.4500 #C94\n" "0 6 30 0.0770 E94\n" "0 6 33 0.5000 #X94\n" "0 6 37 0.0825 C94\n" "0 6 39 0.1390 E94\n" "0 6 40 -0.0210 E94\n" "0 6 41 0.2950 E94\n" "0 6 43 -0.0830 E94\n" "0 6 45 -0.0090 X94\n" "0 6 54 -0.1810 E94\n" "0 6 55 -0.2330 E94\n" "0 6 57 0.1380 E94\n" "0 6 58 -0.2450 E94\n" "0 6 63 0.0630 E94\n" "0 6 64 0.0620 E94\n" "0 7 17 0.5000 #X94\n" "0 7 46 0.1618 X94\n" "0 7 74 0.5000 #X94\n" "0 8 8 0.0000 #C94\n" "0 8 9 -0.0530 E94\n" "0 8 10 0.0090 E94\n" "0 8 12 -0.0510 E94\n" "0 8 15 0.0170 E94\n" "0 8 17 0.0620 E94\n" "0 8 19 0.3470 E94\n" "0 8 20 0.2096 C94\n" "0 8 22 0.1580 E94\n" "0 8 23 0.3600 #C94\n" "0 8 25 0.2679 X94\n" "0 8 26 0.1110 E94\n" "0 8 34 -0.2380 E94\n" "0 8 39 0.1490 E94\n" "0 8 40 -0.0110 E94\n" "0 8 43 -0.0730 E94\n" "0 8 45 -0.0070 E94\n" "0 8 46 -0.1760 E94\n" "0 8 55 -0.2230 E94\n" "0 8 56 -0.1850 E94\n" "0 9 9 0.0000 #C94\n" "0 9 10 0.0620 E94\n" "0 9 12 0.0020 E94\n" "0 9 15 0.0700 E94\n" "0 9 18 0.1880 E94\n" "0 9 19 0.4000 E94\n" "0 9 20 0.2870 E94\n" "0 9 25 0.3180 E94\n" "0 9 27 0.4000 #C94\n" "0 9 34 -0.1850 E94\n" "0 9 35 -0.1500 E94\n" "1 9 37 0.1790 E94\n" "1 9 39 0.2020 E94\n" "0 9 40 0.0420 E94\n" "0 9 41 0.3580 E94\n" "0 9 45 0.0460 E94\n" "0 9 53 0.3179 X94\n" "0 9 54 -0.1180 E94\n" "0 9 55 -0.1700 E94\n" "0 9 56 -0.1320 E94\n" "1 9 57 0.2010 E94\n" "0 9 62 0.1810 E94\n" "1 9 63 0.1260 E94\n" "1 9 64 0.1250 E94\n" "0 9 67 0.2070 E94\n" "1 9 78 0.1380 E94\n" "1 9 81 -0.2080 E94\n" "0 10 10 0.0000 #C94\n" "0 10 13 0.0060 E94\n" "0 10 14 0.0360 E94\n" "0 10 15 0.0080 E94\n" "0 10 17 0.0530 E94\n" "0 10 20 0.2250 E94\n" "0 10 22 0.1490 E94\n" "0 10 25 0.2560 E94\n" "0 10 26 0.1020 E94\n" "0 10 28 0.3700 #C94\n" "0 10 34 -0.2470 E94\n" "0 10 35 -0.2120 E94\n" "0 10 37 0.1170 E94\n" "0 10 39 0.1400 E94\n" "0 10 40 -0.0200 E94\n" "0 10 41 0.2960 E94\n" "0 10 45 -0.0160 E94\n" "0 10 63 0.0640 E94\n" "0 10 64 0.0630 E94\n" "0 11 20 0.2980 E94\n" "0 11 22 0.2317 X94\n" "0 11 25 0.3290 E94\n" "0 11 26 0.1750 E94\n" "0 11 37 0.1900 E94\n" "0 11 40 0.0530 E94\n" "0 12 15 0.0680 E94\n" "0 12 18 0.1860 E94\n" "0 12 19 0.3701 X94\n" "0 12 20 0.2900 #C94\n" "0 12 22 0.2273 X94\n" "0 12 25 0.3160 E94\n" "0 12 26 0.2112 X94\n" "0 12 37 0.1770 E94\n" "0 12 40 0.0400 E94\n" "0 12 57 0.1990 E94\n" "0 12 63 0.1240 E94\n" "0 12 64 0.1230 E94\n" "0 13 20 0.2190 E94\n" "0 13 22 0.1430 E94\n" "0 13 37 0.1110 E94\n" "0 13 64 0.0570 E94\n" "0 14 20 0.1890 E94\n" "0 14 37 0.0810 E94\n" "0 15 15 0.0000 #C94\n" "0 15 18 0.1180 E94\n" "0 15 19 0.3300 E94\n" "0 15 20 0.2170 E94\n" "0 15 22 0.1410 E94\n" "0 15 25 0.2480 E94\n" "0 15 26 0.0940 E94\n" "0 15 30 0.0700 E94\n" "0 15 37 0.1015 C94\n" "0 15 40 -0.0280 E94\n" "0 15 43 -0.0900 E94\n" "0 15 57 0.1310 E94\n" "0 15 63 0.0560 E94\n" "0 15 64 0.0550 E94\n" "0 15 71 0.1800 #C94\n" "0 16 16 0.0000 #C94\n" "0 17 17 0.0000 #X94\n" "0 17 20 0.1720 E94\n" "0 17 22 0.0960 E94\n" "0 17 37 0.0640 E94\n" "0 17 43 -0.1350 E94\n" "0 18 18 0.0000 #X94\n" "0 18 20 0.0990 E94\n" "0 18 22 0.0230 E94\n" "0 18 32 -0.6500 #X94\n" "0 18 37 -0.0090 E94\n" "0 18 39 0.0140 E94\n" "0 18 43 -0.1380 X94\n" "0 18 48 -0.5895 X94\n" "0 18 55 -0.3580 E94\n" "0 18 58 -0.3700 E94\n" "0 18 62 0.2099 X94\n" "0 18 63 -0.0620 E94\n" "0 18 64 -0.0630 E94\n" "0 18 80 -0.0260 E94\n" "0 19 19 0.0000 #X94\n" "0 19 20 -0.1130 E94\n" "0 19 37 -0.2210 E94\n" "0 19 40 -0.3580 E94\n" "0 19 63 -0.2740 E94\n" "0 19 75 -0.3490 E94\n" "0 20 20 0.0000 #C94\n" "0 20 22 -0.0760 E94\n" "0 20 25 0.0310 E94\n" "0 20 26 -0.1230 E94\n" "0 20 30 -0.1380 #C94\n" "0 20 34 -0.4720 E94\n" "0 20 37 -0.1080 E94\n" "0 20 40 -0.2450 E94\n" "0 20 41 0.0710 E94\n" "0 20 43 -0.3070 E94\n" "0 20 45 -0.2410 E94\n" "0 22 22 0.0000 #C94\n" "0 22 30 -0.0710 E94\n" "0 22 34 -0.3960 E94\n" "0 22 37 -0.0320 E94\n" "0 22 40 -0.1690 E94\n" "0 22 41 0.1470 E94\n" "0 22 43 -0.2310 E94\n" "0 22 45 -0.1650 E94\n" "0 23 39 -0.2700 #C94\n" "0 23 62 -0.4000 #X94\n" "0 23 67 -0.2920 E94\n" "0 23 68 -0.3600 #C94\n" "0 25 25 0.0000 #X94\n" "0 25 32 -0.7000 #X94\n" "0 25 37 -0.1390 E94\n" "0 25 39 -0.1160 E94\n" "0 25 40 -0.2760 E94\n" "0 25 43 -0.3380 E94\n" "0 25 57 -0.1170 E94\n" "0 25 63 -0.1920 E94\n" "0 25 71 -0.0362 X94\n" "0 25 72 -0.6773 X94\n" "0 26 26 0.0000 #X94\n" "0 26 34 -0.3490 E94\n" "0 26 37 0.0150 E94\n" "0 26 40 -0.1220 E94\n" "0 26 71 0.0960 X94\n" "0 28 40 -0.4000 #C94\n" "0 28 43 -0.4200 #X94\n" "0 28 48 -0.4000 #X94\n" "0 30 30 0.0000 #C94\n" "0 30 40 -0.0980 E94\n" "1 30 67 0.0670 E94\n" "0 31 70 -0.4300 #C94\n" "0 32 41 0.6500 #C94\n" "0 32 45 0.5200 #X94\n" "0 32 67 0.6330 E94\n" "0 32 68 0.7500 #C94\n" "0 32 69 0.7500 #C94\n" "0 32 73 0.3500 #X94\n" "0 32 77 0.4500 #X94\n" "0 32 82 0.6330 E94\n" "0 34 36 0.4500 #C94\n" "0 34 37 0.3640 E94\n" "0 34 43 0.1650 E94\n" "0 35 37 0.3290 E94\n" "0 35 63 0.2760 E94\n" "0 36 54 -0.4000 #C94\n" "0 36 55 -0.4500 #C94\n" "0 36 56 -0.4500 #C94\n" "0 36 58 -0.4570 E94\n" "4 36 58 -0.4500 #C94\n" "0 36 81 -0.4500 #C94\n" "0 37 37 0.0000 #C94\n" "1 37 37 0.0000 #C94\n" "0 37 38 -0.3100 #C94\n" "0 37 39 0.0230 E94\n" "1 37 39 0.0230 E94\n" "0 37 40 -0.1000 #C94\n" "0 37 41 0.1790 E94\n" "0 37 43 -0.1990 E94\n" "0 37 45 -0.1330 E94\n" "0 37 46 -0.3020 E94\n" "0 37 55 -0.3490 E94\n" "0 37 56 -0.3110 E94\n" "1 37 57 0.0220 E94\n" "0 37 58 -0.3610 E94\n" "1 37 58 -0.3610 E94\n" "4 37 58 -0.3500 #C94\n" "0 37 61 -0.1380 E94\n" "0 37 62 0.0020 E94\n" "0 37 63 0.0000 #C94\n" "1 37 63 -0.0530 E94\n" "0 37 64 0.0000 #C94\n" "1 37 64 -0.0540 E94\n" "1 37 67 0.0280 E94\n" "0 37 69 -0.0895 C94\n" "0 37 78 -0.0410 E94\n" "0 37 81 -0.3870 E94\n" "1 37 81 -0.3870 E94\n" "0 38 38 0.0000 #C94\n" "0 38 63 0.2570 E94\n" "0 38 64 0.2560 E94\n" "0 38 69 0.3380 E94\n" "0 38 78 0.2690 E94\n" "1 39 39 0.0000 #C94\n" "0 39 40 -0.1600 E94\n" "0 39 45 -0.1560 E94\n" "0 39 63 -0.1516 C94\n" "1 39 63 -0.0760 E94\n" "0 39 64 -0.0770 E94\n" "1 39 64 -0.0770 E94\n" "0 39 65 -0.4180 C94\n" "0 39 78 -0.0640 E94\n" "0 40 40 0.0000 #C94\n" "0 40 45 0.0040 E94\n" "0 40 46 -0.1650 E94\n" "0 40 54 -0.1600 E94\n" "0 40 63 0.0840 E94\n" "0 40 64 0.0830 E94\n" "0 40 78 0.0960 E94\n" "0 41 41 0.0000 #C94\n" "0 41 55 -0.5280 E94\n" "0 41 62 -0.1770 E94\n" "0 41 72 -0.5000 #X94\n" "0 41 80 -0.1960 E94\n" "0 42 61 0.4920 E94\n" "0 43 43 0.0000 #X94\n" "0 43 45 0.0660 E94\n" "0 43 64 0.1450 E94\n" "0 44 63 0.0400 #C94\n" "0 44 65 -0.2207 C94\n" "0 44 78 0.0690 E94\n" "0 44 80 0.0930 E94\n" "0 45 63 0.0800 E94\n" "0 45 64 0.0790 E94\n" "0 45 78 0.0920 E94\n" "0 47 53 0.3700 #X94\n" "0 49 50 0.5673 C94\n" "0 51 52 0.5000 #X94\n" "0 55 57 0.3544 C94\n" "0 55 62 0.3510 E94\n" "0 55 64 0.2950 E94\n" "0 55 80 0.3320 E94\n" "0 56 57 0.4000 #C94\n" "0 56 63 0.2580 E94\n" "0 56 80 0.2700 E94\n" "4 57 58 -0.4000 #C94\n" "1 57 63 -0.0750 E94\n" "1 57 64 -0.0760 E94\n" "0 58 63 0.3080 E94\n" "0 58 64 0.3070 E94\n" "0 59 63 0.1400 #C94\n" "0 59 65 -0.1209 C94\n" "0 59 78 0.1690 E94\n" "0 59 80 0.1930 E94\n" "0 59 82 0.2380 E94\n" "0 60 61 0.3700 #X94\n" "0 62 63 -0.0550 E94\n" "0 62 64 -0.0560 E94\n" "0 63 63 0.0000 #C94\n" "1 63 63 0.0000 #C94\n" "0 63 64 0.0000 #C94\n" "0 63 66 -0.3381 C94\n" "0 63 72 -0.4000 E94\n" "0 63 78 0.0120 E94\n" "0 63 81 -0.3340 E94\n" "0 64 64 0.0000 #C94\n" "0 64 65 -0.2888 C94\n" "0 64 66 -0.2272 C94\n" "0 64 78 0.0130 E94\n" "0 64 81 -0.3330 E94\n" "0 64 82 0.0820 E94\n" "0 65 66 0.0000 #C94\n" "0 65 78 0.3070 E94\n" "0 65 81 -0.0390 E94\n" "0 65 82 0.3760 E94\n" "0 66 66 0.0000 #C94\n" "0 66 78 0.2990 E94\n" "0 66 81 -0.0470 E94\n" "0 71 75 -0.0958 X94\n" "0 72 73 0.4500 #X94\n" "0 76 76 0.0000 #X94\n" "0 76 78 0.4000 #X94\n" "0 78 78 0.0000 #C94\n" "1 78 78 0.0000 #C94\n" "0 78 79 -0.3030 E94\n" "0 78 81 -0.3500 #C94\n" "0 79 81 -0.0430 E94\n" "0 80 81 -0.4000 #C94\n"; class std::unique_ptr<MMFFBondCollection> MMFFBondCollection::ds_instance = nullptr; extern const std::string defaultMMFFBond; MMFFBondCollection *MMFFBondCollection::getMMFFBond( const std::string &mmffBond) { if (!ds_instance || !mmffBond.empty()) { ds_instance.reset(new MMFFBondCollection(mmffBond)); } return ds_instance.get(); } MMFFBondCollection::MMFFBondCollection(std::string mmffBond) { if (mmffBond.empty()) { mmffBond = defaultMMFFBond; } std::istringstream inStream(mmffBond); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFBond mmffBondObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int bondType = boost::lexical_cast<unsigned int>(*token); #else d_bondType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int atomType = boost::lexical_cast<unsigned int>(*token); #else d_iAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int jAtomType = boost::lexical_cast<unsigned int>(*token); #else d_jAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; mmffBondObj.kb = boost::lexical_cast<double>(*token); ++token; mmffBondObj.r0 = boost::lexical_cast<double>(*token); ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[bondType][atomType][jAtomType] = mmffBondObj; #else d_params.push_back(mmffBondObj); #endif } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFBond = "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* MMFF BOND PARAMETERS- Rev: 26-OCT-94 Source: MMFF\n" "* C94 = CORE MMFF parameter - obtained from ab initio data\n" "* X94 = EXTD MMFF parameter - fit to additional ab initio data\n" "* E94 = r0 from fit to X-ray data, kb from empirical rule\n" "* #C94 = r0 lies between C94 and E94 values, kb from empirical rule\n" "* #X94 = r0 lies between X94 and E94 values, kb from empirical rule\n" "* #E94 = r0 and k both from empirical rules\n" "*\n" "* types kb r0 Source\n" "0 1 1 4.258 1.508 C94\n" "0 1 2 4.539 1.482 C94\n" "0 1 3 4.190 1.492 C94\n" "0 1 4 4.707 1.459 X94\n" "0 1 5 4.766 1.093 C94\n" "0 1 6 5.047 1.418 C94\n" "0 1 8 5.084 1.451 C94\n" "0 1 9 4.763 1.458 C94\n" "0 1 10 4.664 1.436 C94\n" "0 1 11 6.011 1.360 #C94\n" "0 1 12 2.974 1.773 C94\n" "0 1 13 2.529 1.949 E94\n" "0 1 14 1.706 2.090 E94\n" "0 1 15 2.893 1.805 C94\n" "0 1 17 2.841 1.813 X94\n" "0 1 18 3.258 1.772 X94\n" "0 1 19 2.866 1.830 #X94\n" "0 1 20 4.650 1.504 C94\n" "0 1 22 4.286 1.482 E94\n" "0 1 25 2.980 1.810 #X94\n" "0 1 26 2.790 1.830 #X94\n" "0 1 34 3.844 1.480 #C94\n" "0 1 35 7.915 1.307 X94\n" "0 1 37 4.957 1.486 C94\n" "0 1 39 6.114 1.445 C94\n" "0 1 40 4.922 1.446 C94\n" "0 1 41 3.830 1.510 #C94\n" "0 1 43 3.971 1.472 X94\n" "0 1 45 3.844 1.480 X94\n" "0 1 46 3.813 1.482 X94\n" "0 1 54 4.267 1.461 C94\n" "0 1 55 4.646 1.454 C94\n" "0 1 56 4.166 1.453 C94\n" "0 1 57 4.669 1.461 E94\n" "0 1 58 4.329 1.451 E94\n" "0 1 61 4.845 1.424 X94\n" "0 1 62 4.456 1.444 X94\n" "0 1 63 4.481 1.471 E94\n" "0 1 64 4.518 1.469 E94\n" "0 1 67 4.188 1.459 E94\n" "0 1 68 4.217 1.479 C94\n" "0 1 72 2.956 1.801 X94\n" "0 1 73 2.608 1.839 X94\n" "0 1 75 2.547 1.858 E94\n" "0 1 78 4.593 1.465 E94\n" "0 1 80 4.373 1.477 E94\n" "0 1 81 4.512 1.441 E94\n" "0 2 2 9.505 1.333 C94\n" "1 2 2 5.310 1.430 #C94\n" "1 2 3 4.565 1.468 C94\n" "0 2 4 9.538 1.297 E94\n" "1 2 4 5.657 1.415 E94\n" "0 2 5 5.170 1.083 C94\n" "0 2 6 5.520 1.373 C94\n" "1 2 9 6.385 1.360 E94\n" "0 2 10 6.329 1.362 E94\n" "0 2 11 6.283 1.350 #X94\n" "0 2 12 3.390 1.720 #X94\n" "0 2 13 3.413 1.854 E94\n" "0 2 14 2.062 2.025 E94\n" "0 2 15 3.896 1.720 E94\n" "0 2 17 3.247 1.773 E94\n" "0 2 18 3.789 1.728 E94\n" "0 2 19 3.052 1.811 E94\n" "0 2 20 4.593 1.465 E94\n" "0 2 22 4.926 1.448 E94\n" "0 2 25 3.750 1.742 E94\n" "0 2 30 8.166 1.331 E94\n" "0 2 34 5.207 1.407 E94\n" "0 2 35 10.343 1.250 #X94\n" "1 2 37 5.007 1.449 C94\n" "1 2 39 6.164 1.368 E94\n" "0 2 40 6.110 1.370 #C94\n" "0 2 41 3.746 1.505 C94\n" "0 2 43 4.928 1.420 E94\n" "0 2 45 4.725 1.430 #X94\n" "0 2 46 7.466 1.325 E94\n" "0 2 55 6.164 1.368 E94\n" "0 2 56 6.246 1.365 E94\n" "0 2 62 7.105 1.336 X94\n" "1 2 63 6.030 1.400 E94\n" "1 2 64 5.754 1.411 E94\n" "1 2 67 4.685 1.432 E94\n" "0 2 72 4.179 1.700 #X94\n" "1 2 81 6.357 1.361 E94\n" "1 3 3 4.418 1.489 C94\n" "1 3 4 5.135 1.438 E94\n" "0 3 5 4.650 1.101 C94\n" "0 3 6 5.801 1.355 C94\n" "0 3 7 12.950 1.222 C94\n" "0 3 9 10.077 1.290 C94\n" "1 3 9 6.273 1.364 E94\n" "0 3 10 5.829 1.369 C94\n" "0 3 11 6.570 1.340 E94\n" "0 3 12 3.449 1.715 E94\n" "0 3 15 3.536 1.748 E94\n" "0 3 16 4.735 1.665 E94\n" "0 3 17 2.888 1.808 E94\n" "0 3 18 3.394 1.760 E94\n" "0 3 20 3.298 1.530 C94\n" "0 3 22 4.593 1.465 E94\n" "0 3 25 3.164 1.792 E94\n" "1 3 30 4.481 1.471 E94\n" "0 3 35 11.012 1.237 E94\n" "1 3 37 4.488 1.457 C94\n" "1 3 39 5.978 1.375 E94\n" "0 3 40 6.110 1.370 #C94\n" "0 3 41 4.286 1.482 E94\n" "0 3 43 4.928 1.420 X94\n" "0 3 45 4.531 1.440 E94\n" "0 3 48 5.412 1.398 E94\n" "0 3 51 8.562 1.290 #X94\n" "0 3 53 7.637 1.320 #X94\n" "0 3 54 10.333 1.280 C94\n" "1 3 54 2.771 1.563 E94\n" "0 3 55 4.886 1.422 E94\n" "0 3 56 4.907 1.421 E94\n" "1 3 57 5.492 1.422 E94\n" "1 3 58 5.163 1.409 E94\n" "0 3 62 7.568 1.322 E94\n" "1 3 63 5.468 1.423 E94\n" "1 3 64 5.288 1.431 E94\n" "0 3 67 8.217 1.304 E94\n" "0 3 74 5.204 1.639 X94\n" "0 3 75 4.191 1.710 #X94\n" "1 3 78 5.705 1.413 E94\n" "1 3 80 6.719 1.375 E94\n" "0 4 4 15.206 1.200 #X94\n" "0 4 5 5.726 1.065 X94\n" "0 4 6 7.193 1.328 E94\n" "0 4 7 14.916 1.176 E94\n" "0 4 9 15.589 1.172 E94\n" "1 4 9 7.041 1.338 E94\n" "0 4 10 6.824 1.345 E94\n" "0 4 15 4.330 1.690 E94\n" "0 4 20 5.178 1.436 E94\n" "0 4 22 5.400 1.426 E94\n" "0 4 30 10.227 1.282 E94\n" "1 4 37 5.445 1.424 E94\n" "0 4 42 16.582 1.160 #X94\n" "0 4 43 6.947 1.341 E94\n" "1 4 63 5.633 1.416 E94\n" "1 4 64 5.492 1.422 E94\n" "0 5 19 2.254 1.485 X94\n" "0 5 20 4.852 1.093 C94\n" "0 5 22 5.191 1.082 C94\n" "0 5 30 5.176 1.086 C94\n" "0 5 37 5.306 1.084 C94\n" "0 5 41 3.256 1.144 C94\n" "0 5 57 5.633 1.076 C94\n" "0 5 63 5.531 1.080 C94\n" "0 5 64 5.506 1.080 C94\n" "0 5 78 5.506 1.080 C94\n" "0 5 80 5.633 1.076 C94\n" "0 6 6 4.088 1.449 E94\n" "0 6 8 5.059 1.450 C94\n" "0 6 9 4.491 1.395 E94\n" "0 6 10 5.982 1.410 C94\n" "0 6 15 4.757 1.661 E94\n" "0 6 17 5.779 1.608 E94\n" "0 6 18 5.326 1.630 #X94\n" "0 6 19 4.661 1.660 #X94\n" "0 6 20 5.623 1.433 C94\n" "0 6 21 7.794 0.972 C94\n" "0 6 22 4.556 1.433 E94\n" "0 6 24 7.403 0.981 C94\n" "0 6 25 5.243 1.630 #X94\n" "0 6 26 5.481 1.618 E94\n" "0 6 29 7.839 0.973 C94\n" "0 6 30 9.359 1.271 E94\n" "0 6 33 7.143 0.986 X94\n" "0 6 37 5.614 1.376 C94\n" "0 6 39 4.629 1.388 E94\n" "0 6 40 4.609 1.389 E94\n" "0 6 41 6.754 1.342 E94\n" "0 6 43 3.937 1.426 E94\n" "0 6 45 4.321 1.404 X94\n" "0 6 54 5.117 1.365 E94\n" "0 6 55 4.772 1.381 E94\n" "0 6 57 7.128 1.330 E94\n" "0 6 58 4.792 1.380 E94\n" "0 6 63 7.324 1.324 E94\n" "0 6 64 6.664 1.345 E94\n" "0 7 17 8.770 1.500 #X94\n" "0 7 46 9.329 1.235 X94\n" "0 7 74 9.129 1.490 #X94\n" "0 8 8 3.264 1.420 E94\n" "0 8 9 4.581 1.342 E94\n" "0 8 10 3.909 1.378 E94\n" "0 8 12 3.371 1.761 E94\n" "0 8 15 4.060 1.652 E94\n" "0 8 17 3.901 1.663 E94\n" "0 8 19 4.254 1.700 E94\n" "0 8 20 5.107 1.456 C94\n" "0 8 22 4.223 1.457 E94\n" "0 8 23 6.490 1.019 C94\n" "0 8 25 4.629 1.660 E94\n" "0 8 26 4.027 1.699 E94\n" "0 8 34 3.775 1.386 E94\n" "0 8 39 3.435 1.408 E94\n" "0 8 40 3.710 1.390 E94\n" "0 8 43 3.977 1.374 E94\n" "0 8 45 4.267 1.358 E94\n" "0 8 46 5.519 1.301 E94\n" "0 8 55 4.229 1.360 E94\n" "0 8 56 3.995 1.373 E94\n" "0 9 9 7.256 1.243 E94\n" "1 9 9 3.808 1.384 E94\n" "0 9 10 4.480 1.347 E94\n" "0 9 12 3.635 1.739 E94\n" "0 9 15 3.791 1.671 E94\n" "0 9 18 4.465 1.626 E94\n" "0 9 19 3.687 1.741 E94\n" "0 9 20 4.401 1.447 E94\n" "0 9 25 5.379 1.619 E94\n" "0 9 27 6.230 1.026 C94\n" "0 9 34 3.223 1.423 E94\n" "0 9 35 5.095 1.366 E94\n" "1 9 37 5.529 1.393 E94\n" "1 9 39 4.685 1.337 E94\n" "0 9 40 4.382 1.352 E94\n" "0 9 41 5.650 1.388 E94\n" "0 9 45 4.857 1.329 E94\n" "0 9 53 7.291 1.242 X94\n" "0 9 54 4.991 1.323 E94\n" "0 9 55 3.825 1.383 E94\n" "0 9 56 4.602 1.341 E94\n" "1 9 57 6.824 1.345 E94\n" "0 9 62 4.749 1.334 E94\n" "1 9 63 6.824 1.345 E94\n" "1 9 64 5.458 1.396 E94\n" "0 9 67 6.752 1.258 E94\n" "1 9 78 6.644 1.351 E94\n" "1 9 81 3.909 1.378 E94\n" "0 10 10 3.977 1.374 E94\n" "0 10 13 3.110 1.878 E94\n" "0 10 14 1.967 2.029 E94\n" "0 10 15 3.593 1.686 E94\n" "0 10 17 3.930 1.661 E94\n" "0 10 20 4.240 1.456 E94\n" "0 10 22 4.970 1.418 E94\n" "0 10 25 3.820 1.714 E94\n" "0 10 26 3.651 1.727 E94\n" "0 10 28 6.663 1.015 C94\n" "0 10 34 3.960 1.375 E94\n" "0 10 35 4.898 1.375 E94\n" "0 10 37 5.482 1.395 E94\n" "0 10 39 4.382 1.352 E94\n" "0 10 40 3.841 1.382 E94\n" "0 10 41 7.466 1.325 E94\n" "0 10 45 3.524 1.402 E94\n" "0 10 63 6.137 1.369 E94\n" "0 10 64 5.952 1.376 E94\n" "0 11 20 6.339 1.348 E94\n" "0 11 22 5.296 1.389 X94\n" "0 11 25 6.019 1.583 E94\n" "0 11 26 6.204 1.575 E94\n" "0 11 37 6.511 1.342 E94\n" "0 11 40 4.187 1.440 E94\n" "0 12 15 2.978 2.031 E94\n" "0 12 18 2.808 2.051 E94\n" "0 12 19 2.838 2.050 #X94\n" "0 12 20 2.859 1.751 C94\n" "0 12 22 3.056 1.750 #X94\n" "0 12 25 3.063 2.023 E94\n" "0 12 26 2.448 2.100 #X94\n" "0 12 37 3.378 1.721 E94\n" "0 12 40 3.737 1.731 E94\n" "0 12 57 3.714 1.694 E94\n" "0 12 63 3.413 1.718 E94\n" "0 12 64 3.649 1.699 E94\n" "0 13 20 2.767 1.920 E94\n" "0 13 22 2.928 1.902 E94\n" "0 13 37 3.031 1.891 E94\n" "0 13 64 3.031 1.891 E94\n" "0 14 20 0.884 2.332 E94\n" "0 14 37 1.781 2.075 E94\n" "0 15 15 2.531 2.050 C94\n" "0 15 18 2.214 2.094 E94\n" "0 15 19 2.022 2.146 E94\n" "0 15 20 2.757 1.822 E94\n" "0 15 22 3.802 1.727 E94\n" "0 15 25 2.319 2.112 E94\n" "0 15 26 2.359 2.106 E94\n" "0 15 30 3.750 1.731 E94\n" "0 15 37 3.565 1.765 C94\n" "0 15 40 3.859 1.666 E94\n" "0 15 43 3.221 1.717 E94\n" "0 15 57 3.993 1.713 E94\n" "0 15 63 3.724 1.733 E94\n" "0 15 64 3.548 1.747 E94\n" "0 15 71 4.014 1.341 C94\n" "0 17 20 2.397 1.865 E94\n" "0 17 22 2.566 1.844 E94\n" "0 17 37 3.098 1.787 E94\n" "0 17 43 4.900 1.601 E94\n" "0 18 20 3.172 1.780 E94\n" "0 18 22 2.757 1.822 E94\n" "0 18 32 10.748 1.450 #X94\n" "0 18 37 3.281 1.770 X94\n" "0 18 39 3.504 1.693 X94\n" "0 18 43 3.301 1.710 #X94\n" "0 18 48 6.186 1.540 X94\n" "0 18 55 4.432 1.628 E94\n" "0 18 58 2.568 1.783 E94\n" "0 18 62 5.510 1.570 #X94\n" "0 18 63 3.524 1.749 E94\n" "0 18 64 3.856 1.723 E94\n" "0 18 80 4.150 1.702 E94\n" "0 19 20 2.288 1.900 E94\n" "0 19 37 3.072 1.809 E94\n" "0 19 40 4.470 1.686 E94\n" "0 19 63 3.219 1.795 E94\n" "0 19 75 1.600 2.226 E94\n" "0 20 20 3.663 1.526 C94\n" "0 20 22 4.251 1.484 E94\n" "0 20 25 2.718 1.838 E94\n" "0 20 26 2.588 1.853 E94\n" "0 20 30 3.977 1.507 C94\n" "0 20 34 4.171 1.460 E94\n" "0 20 37 3.740 1.516 E94\n" "0 20 40 4.784 1.427 E94\n" "0 20 41 4.286 1.482 E94\n" "0 20 43 3.737 1.487 E94\n" "0 20 45 3.844 1.480 E94\n" "0 22 22 3.969 1.499 C94\n" "0 22 30 3.785 1.513 E94\n" "0 22 34 4.103 1.464 E94\n" "0 22 37 4.481 1.471 E94\n" "0 22 40 4.188 1.459 E94\n" "0 22 41 5.071 1.441 E94\n" "0 22 43 4.070 1.466 E94\n" "0 22 45 4.311 1.452 E94\n" "0 23 39 7.112 1.012 C94\n" "0 23 62 6.339 1.026 X94\n" "0 23 67 6.610 1.019 #E94\n" "0 23 68 5.899 1.038 C94\n" "0 25 25 1.514 2.253 E94\n" "0 25 32 8.296 1.510 #X94\n" "0 25 37 3.586 1.755 E94\n" "0 25 39 4.370 1.676 E94\n" "0 25 40 4.629 1.660 E94\n" "0 25 43 3.237 1.762 X94\n" "0 25 57 4.356 1.699 E94\n" "0 25 63 3.711 1.745 E94\n" "0 25 71 3.001 1.411 X94\n" "0 25 72 3.744 1.950 #X94\n" "0 26 26 1.414 2.279 E94\n" "0 26 34 3.395 1.748 E94\n" "0 26 37 3.207 1.788 E94\n" "0 26 40 4.870 1.646 E94\n" "0 26 71 2.959 1.415 C94\n" "0 28 40 6.576 1.018 C94\n" "0 28 43 6.265 1.028 X94\n" "0 28 48 6.413 1.024 X94\n" "0 30 30 9.579 1.343 C94\n" "1 30 30 5.355 1.428 E94\n" "0 30 40 8.447 1.298 E94\n" "1 30 67 5.274 1.404 E94\n" "0 31 70 7.880 0.969 C94\n" "0 32 41 9.756 1.261 C94\n" "0 32 45 9.420 1.233 X94\n" "0 32 67 7.926 1.269 E94\n" "0 32 68 4.398 1.348 C94\n" "0 32 69 6.098 1.261 C94\n" "0 32 73 8.427 1.510 #X94\n" "0 32 77 10.648 1.450 #X94\n" "0 32 82 8.594 1.252 E94\n" "0 34 36 6.163 1.028 C94\n" "0 34 37 4.347 1.450 E94\n" "0 34 43 4.401 1.351 E94\n" "0 35 37 9.767 1.262 E94\n" "0 35 63 12.760 1.207 E94\n" "0 36 54 6.529 1.022 C94\n" "0 36 55 6.744 1.014 C94\n" "0 36 56 6.490 1.017 C94\n" "0 36 58 6.610 1.019 #E94\n" "0 36 81 6.980 1.016 C94\n" "0 37 37 5.573 1.374 C94\n" "1 37 37 5.178 1.436 E94\n" "0 37 38 5.737 1.333 C94\n" "0 37 39 5.978 1.375 E94\n" "1 37 39 5.650 1.388 E94\n" "0 37 40 6.168 1.398 C94\n" "0 37 41 4.537 1.468 E94\n" "0 37 43 4.764 1.428 X94\n" "0 37 45 4.705 1.431 E94\n" "0 37 46 6.191 1.367 E94\n" "0 37 55 6.615 1.352 E94\n" "0 37 56 5.055 1.414 E94\n" "1 37 57 5.092 1.440 E94\n" "0 37 58 7.432 1.326 E94\n" "1 37 58 5.055 1.414 E94\n" "0 37 61 5.724 1.385 E94\n" "0 37 62 7.137 1.335 E94\n" "0 37 63 6.095 1.372 C94\n" "1 37 63 5.178 1.436 E94\n" "0 37 64 6.161 1.379 C94\n" "1 37 64 5.265 1.432 E94\n" "1 37 67 4.725 1.430 E94\n" "0 37 69 5.396 1.352 C94\n" "0 37 78 6.719 1.375 E94\n" "0 37 81 3.987 1.471 E94\n" "1 37 81 4.531 1.440 E94\n" "0 38 38 5.002 1.246 C94\n" "0 38 63 7.299 1.330 E94\n" "0 38 64 6.978 1.340 E94\n" "0 38 69 5.036 1.321 E94\n" "0 38 78 6.218 1.366 E94\n" "0 39 40 4.101 1.367 E94\n" "0 39 45 3.524 1.402 E94\n" "0 39 63 6.301 1.364 C94\n" "1 39 63 6.137 1.369 E94\n" "0 39 64 6.357 1.361 E94\n" "1 39 64 5.482 1.395 E94\n" "0 39 65 5.513 1.339 C94\n" "0 39 78 6.137 1.369 E94\n" "0 40 40 4.248 1.359 E94\n" "0 40 45 4.305 1.356 E94\n" "0 40 46 4.727 1.335 E94\n" "0 40 54 6.817 1.256 E94\n" "0 40 63 6.733 1.348 E94\n" "0 40 64 6.644 1.351 E94\n" "0 40 78 5.900 1.378 E94\n" "0 41 41 5.029 1.443 E94\n" "0 41 55 5.577 1.391 E94\n" "0 41 62 7.137 1.335 E94\n" "0 41 72 4.519 1.678 X94\n" "0 41 80 5.222 1.434 E94\n" "0 42 61 16.223 1.087 E94\n" "0 43 43 4.211 1.361 E94\n" "0 43 45 3.710 1.390 E94\n" "0 43 64 5.389 1.399 E94\n" "0 44 63 3.589 1.717 C94\n" "0 44 65 3.374 1.684 C94\n" "0 44 78 3.711 1.734 E94\n" "0 44 80 3.910 1.719 E94\n" "0 45 63 5.119 1.411 E94\n" "0 45 64 5.076 1.413 E94\n" "0 45 78 5.724 1.385 E94\n" "0 47 53 12.192 1.140 #X94\n" "0 49 50 6.812 0.991 C94\n" "0 51 52 7.100 0.987 X94\n" "0 55 57 7.227 1.319 C94\n" "0 55 62 3.977 1.374 E94\n" "0 55 64 5.529 1.393 E94\n" "0 55 80 7.500 1.324 E94\n" "0 56 57 4.137 1.383 C94\n" "0 56 63 5.900 1.378 E94\n" "0 56 80 6.470 1.357 E94\n" "1 57 63 5.400 1.426 E94\n" "1 57 64 5.135 1.438 E94\n" "0 58 63 6.794 1.346 E94\n" "0 58 64 6.164 1.368 E94\n" "0 59 63 5.787 1.360 C94\n" "0 59 65 4.756 1.388 C94\n" "0 59 78 6.127 1.364 E94\n" "0 59 80 7.064 1.332 E94\n" "0 59 82 3.855 1.431 E94\n" "0 60 61 15.749 1.170 #X94\n" "0 62 63 6.947 1.341 E94\n" "0 62 64 6.273 1.364 E94\n" "1 63 63 5.729 1.412 E94\n" "0 63 64 7.118 1.377 C94\n" "0 63 66 8.326 1.313 C94\n" "0 63 72 4.503 1.679 E94\n" "0 63 78 7.434 1.352 E94\n" "0 63 81 7.778 1.316 E94\n" "0 64 64 4.313 1.418 C94\n" "1 64 64 4.926 1.448 E94\n" "0 64 65 8.258 1.335 C94\n" "0 64 66 4.456 1.369 C94\n" "0 64 78 5.492 1.422 E94\n" "0 64 81 5.824 1.381 E94\n" "0 64 82 6.794 1.346 E94\n" "0 65 66 7.243 1.323 C94\n" "0 65 78 8.447 1.298 E94\n" "0 65 81 5.223 1.313 E94\n" "0 65 82 5.622 1.297 E94\n" "0 66 66 3.874 1.368 C94\n" "0 66 78 6.385 1.360 E94\n" "0 66 81 3.960 1.375 E94\n" "0 67 67 6.085 1.280 E94\n" "0 71 75 2.852 1.423 X94\n" "0 72 73 2.628 2.035 X94\n" "0 76 76 4.286 1.357 X94\n" "0 76 78 6.824 1.345 X94\n" "0 78 78 5.573 1.374 C94\n" "0 78 79 8.890 1.287 E94\n" "0 78 81 5.046 1.381 C94\n" "0 79 79 6.408 1.269 E94\n" "0 79 81 4.305 1.356 E94\n" "0 80 81 8.237 1.335 C94\n"; class std::unique_ptr<MMFFBndkCollection> MMFFBndkCollection::ds_instance = nullptr; extern const std::string defaultMMFFBndk; MMFFBndkCollection *MMFFBndkCollection::getMMFFBndk( const std::string &mmffBndk) { if (!ds_instance || !mmffBndk.empty()) { ds_instance.reset(new MMFFBndkCollection(mmffBndk)); } return ds_instance.get(); } MMFFBndkCollection::MMFFBndkCollection(std::string mmffBndk) { if (mmffBndk.empty()) { mmffBndk = defaultMMFFBndk; } std::istringstream inStream(mmffBndk); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFBond mmffBondObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int iAtomicNum = boost::lexical_cast<unsigned int>(*token); #else d_iAtomicNum.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int jAtomicNum = boost::lexical_cast<unsigned int>(*token); #else d_jAtomicNum.push_back( (std::uint8_t)boost::lexical_cast<unsigned int>(*token)); #endif ++token; mmffBondObj.r0 = boost::lexical_cast<double>(*token); ++token; mmffBondObj.kb = boost::lexical_cast<double>(*token); ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[iAtomicNum][jAtomicNum] = mmffBondObj; #else d_params.push_back(mmffBondObj); #endif } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFBndk = "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* MMFF BOND LENGTH, FORCE CONSTANT DEFAULT-RULE PARAMETERS\n" "* C94 = Fitted to ab-initio derived core MMFF force constants\n" "* E94 = Based on Herschberg/Laurie parameterization of Badger's rule\n" "*\n" "* species r0(ref) kb(ref) Source\n" "1 6 1.084 5.15 C94\n" "1 7 1.001 7.35 C94\n" "1 8 0.947 9.10 C94\n" "1 9 0.92 10.6 E94\n" "1 14 1.48 2.3 E94\n" "1 15 1.415 2.95 C94\n" "1 16 1.326 4.30 C94\n" "1 17 1.28 4.3 E94\n" "1 35 1.41 4.2 E94\n" "1 53 1.60 2.7 E94\n" "6 6 1.512 3.80 C94\n" "6 7 1.439 4.55 C94\n" "6 8 1.393 5.40 C94\n" "6 9 1.353 6.20 C94\n" "6 14 1.86 2.6 E94\n" "6 15 1.84 2.7 E94\n" "6 16 1.812 2.85 C94\n" "6 17 1.781 2.75 C94\n" "6 35 1.94 2.6 E94\n" "6 53 2.16 1.4 E94\n" "7 7 1.283 6.00 C94\n" "7 8 1.333 5.90 C94\n" "7 9 1.36 5.9 E94\n" "7 14 1.74 3.7 E94\n" "7 15 1.65 4.8 E94\n" "7 16 1.674 3.75 C94\n" "7 17 1.75 3.5 E94\n" "7 35 1.90 2.9 E94\n" "7 53 2.10 1.6 E94\n" "8 8 1.48 3.6 E94\n" "8 9 1.42 4.6 E94\n" "8 14 1.63 5.2 E94\n" "8 15 1.66 4.7 E94\n" "8 16 1.470 9.90 C94\n" "8 17 1.70 4.1 E94\n" "8 35 1.85 3.4 E94\n" "8 53 2.05 1.6 E94\n" "9 14 1.57 6.4 E94\n" "9 15 1.54 7.1 E94\n" "9 16 1.55 6.9 E94\n" "14 14 2.32 1.3 E94\n" "14 15 2.25 1.5 E94\n" "14 16 2.15 2.0 E94\n" "14 17 2.02 3.1 E94\n" "14 35 2.19 2.1 E94\n" "14 53 2.44 1.5 E94\n" "15 15 2.21 1.7 E94\n" "15 16 2.10 2.4 E94\n" "15 17 2.03 3.0 E94\n" "15 35 2.21 2.0 E94\n" "15 53 2.47 1.4 E94\n" "16 16 2.052 2.50 C94\n" "16 17 2.04 2.9 E94\n" "16 35 2.24 1.9 E94\n" "16 53 2.40 1.7 E94\n" "17 17 1.99 3.5 E94\n" "35 35 2.28 2.4 E94\n" "53 53 2.67 1.6 E94\n"; class std::unique_ptr<MMFFHerschbachLaurieCollection> MMFFHerschbachLaurieCollection::ds_instance = nullptr; extern const std::string defaultMMFFHerschbachLaurie; MMFFHerschbachLaurieCollection * MMFFHerschbachLaurieCollection::getMMFFHerschbachLaurie( const std::string &mmffHerschbachLaurie) { if (!ds_instance || !mmffHerschbachLaurie.empty()) { ds_instance.reset(new MMFFHerschbachLaurieCollection(mmffHerschbachLaurie)); } return ds_instance.get(); } MMFFHerschbachLaurieCollection::MMFFHerschbachLaurieCollection( std::string mmffHerschbachLaurie) { if (mmffHerschbachLaurie.empty()) { mmffHerschbachLaurie = defaultMMFFHerschbachLaurie; } std::istringstream inStream(mmffHerschbachLaurie); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFHerschbachLaurie mmffHerschbachLaurieObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int iRow = boost::lexical_cast<unsigned int>(*token); #else d_iRow.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int jRow = boost::lexical_cast<unsigned int>(*token); #else d_jRow.push_back( (std::uint8_t)boost::lexical_cast<unsigned int>(*token)); #endif ++token; mmffHerschbachLaurieObj.a_ij = boost::lexical_cast<double>(*token); ++token; mmffHerschbachLaurieObj.d_ij = boost::lexical_cast<double>(*token); ++token; mmffHerschbachLaurieObj.dp_ij = boost::lexical_cast<double>(*token); ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[iRow][jRow] = mmffHerschbachLaurieObj; #else d_params.push_back(mmffHerschbachLaurieObj); #endif } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFHerschbachLaurie = "*Table I. Parameters for Badger's Rule.\n" "*\n" "* i j a_ij d_ij dp_ij\n" "0 0 1.26 0.025 0.025\n" "0 1 1.66 0.30 0.36\n" "0 2 1.84 0.38 0.58\n" "0 3 1.98 0.49 0.65\n" "0 4 2.03 0.51 0.80\n" "0 5 2.03 0.25 0.81\n" "0 30 1.85 0.15 0.53\n" "0 40 1.84 0.61 0.61\n" "0 50 1.78 0.97 0.62\n" "1 1 1.91 0.68 0.68\n" "1 2 2.28 0.74 0.92\n" "1 3 2.35 0.85 1.02\n" "1 4 2.33 0.68 1.12\n" "1 5 2.50 0.97 1.22\n" "1 30 2.08 1.14 0.97\n" "1 40 2.34 1.17 1.08\n" "2 2 2.41 1.18 1.18\n" "2 3 2.52 1.02 1.28\n" "2 4 2.61 1.28 1.40\n" "2 5 2.60 0.84 1.24\n" "3 3 2.58 1.41 1.35\n" "3 4 2.66 0.86 1.48\n" "3 5 2.75 1.14 1.55\n" "4 4 2.85 1.62 1.62\n" "4 5 2.76 1.25 1.51\n"; class std::unique_ptr<MMFFCovRadPauEleCollection> MMFFCovRadPauEleCollection::ds_instance = nullptr; extern const std::string defaultMMFFCovRadPauEle; MMFFCovRadPauEleCollection *MMFFCovRadPauEleCollection::getMMFFCovRadPauEle( const std::string &mmffCovRadPauEle) { if (!ds_instance || !mmffCovRadPauEle.empty()) { ds_instance.reset(new MMFFCovRadPauEleCollection(mmffCovRadPauEle)); } return ds_instance.get(); } MMFFCovRadPauEleCollection::MMFFCovRadPauEleCollection( std::string mmffCovRadPauEle) { if (mmffCovRadPauEle.empty()) { mmffCovRadPauEle = defaultMMFFCovRadPauEle; } std::istringstream inStream(mmffCovRadPauEle); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFCovRadPauEle mmffCovRadPauEleObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int atomicNum = boost::lexical_cast<unsigned int>(*token); #else d_atomicNum.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; mmffCovRadPauEleObj.r0 = boost::lexical_cast<double>(*token); ++token; mmffCovRadPauEleObj.chi = boost::lexical_cast<double>(*token); ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[atomicNum] = mmffCovRadPauEleObj; #else d_params.push_back(mmffCovRadPauEleObj); #endif } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFCovRadPauEle = "* atomicNum covRad pauEle\n" "1 0.33 2.20\n" "3 1.34 0.97\n" "6 0.77 2.50\n" "7 0.73 3.07\n" "8 0.72 3.50\n" "9 0.74 4.12\n" "11 1.54 1.01\n" "12 1.30 1.23\n" "14 1.15 1.74\n" "15 1.09 2.06\n" "16 1.03 2.44\n" "17 1.01 2.83\n" "19 1.96 0.91\n" "20 1.74 1.04\n" "29 1.38 1.75\n" "30 1.31 1.66\n" "35 1.15 2.74\n" "53 1.33 2.21\n"; class std::unique_ptr<MMFFAngleCollection> MMFFAngleCollection::ds_instance = nullptr; extern const std::string defaultMMFFAngleData[]; MMFFAngleCollection *MMFFAngleCollection::getMMFFAngle( const std::string &mmffAngle) { if (!ds_instance || !mmffAngle.empty()) { ds_instance.reset(new MMFFAngleCollection(mmffAngle)); } return ds_instance.get(); } MMFFAngleCollection::MMFFAngleCollection(std::string mmffAngle) { if (mmffAngle.empty()) { unsigned int i = 0; while (defaultMMFFAngleData[i] != "EOS") { mmffAngle += defaultMMFFAngleData[i]; ++i; } } std::istringstream inStream(mmffAngle); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFAngle mmffAngleObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int angleType = boost::lexical_cast<unsigned int>(*token); #else d_angleType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int iAtomType = boost::lexical_cast<unsigned int>(*token); #else d_iAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int jAtomType = boost::lexical_cast<unsigned int>(*token); #else d_jAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int kAtomType = boost::lexical_cast<unsigned int>(*token); #else d_kAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; mmffAngleObj.ka = boost::lexical_cast<double>(*token); ++token; mmffAngleObj.theta0 = boost::lexical_cast<double>(*token); ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[angleType][iAtomType][jAtomType][kAtomType] = mmffAngleObj; #else d_params.push_back(mmffAngleObj); #endif } inLine = RDKit::getLine(inStream); } } // another joy of VC++ "compiler limit: string exceeds 65535 bytes in length" // compels us to // break this into pieces const std::string defaultMMFFAngleData[] = { "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* MMFF ANGLE PARAMETERS- Rev: 26-Oct-94 Source: MMFF\n" "* C94 = CORE MMFF parameter - obtained from ab initio data\n" "* X94 = EXTD MMFF parameter - fit to additional ab initio data\n" "* E94 = theta0 from fit to X-ray data, ka from empirical rule\n" "* #E94 = theta0 and ka both from empirical rules\n" "*\n" "* atom types ka theta0 Comment/origin\n" "0 0 1 0 0.000 108.900 0:*-1-* MMFF DEF\n" "0 1 1 1 0.851 109.608 C94\n" "0 1 1 2 0.736 109.445 C94\n" "0 1 1 3 0.777 107.517 C94\n" "0 1 1 4 1.006 110.265 E94\n" "0 1 1 5 0.636 110.549 C94\n" "0 1 1 6 0.992 108.133 C94\n" "0 1 1 8 0.777 108.290 C94\n" "0 1 1 9 1.136 108.194 E94\n" "0 1 1 10 1.050 109.960 C94\n" "0 1 1 11 1.225 108.313 C94\n" "0 1 1 12 1.056 108.679 C94\n" "0 1 1 13 1.078 106.820 E94\n" "0 1 1 14 0.980 109.945 E94\n" "0 1 1 15 0.743 107.397 C94\n" "0 1 1 17 1.089 108.578 E94\n" "0 1 1 18 1.093 109.315 E94\n" "0 1 1 19 0.755 115.436 E94\n" "0 1 1 20 1.021 108.659 E94\n" "0 1 1 22 1.001 110.125 E94\n" "0 1 1 25 0.803 112.356 X94\n" "0 1 1 26 0.833 109.879 E94\n" "0 1 1 34 1.179 106.493 C94\n" "0 1 1 37 0.756 108.617 C94\n" "0 1 1 39 0.927 109.170 C94\n" "0 1 1 40 1.130 108.678 E94\n" "0 1 1 41 0.330 98.422 C94\n" "0 1 1 43 1.135 108.019 E94\n" "0 1 1 45 1.197 105.028 E94\n" "0 1 1 54 1.173 106.424 E94\n" "0 1 1 55 1.150 107.604 E94\n" "0 1 1 56 1.199 110.371 C94\n" "0 1 1 57 1.012 109.900 E94\n" "0 1 1 58 1.179 106.327 E94\n" "0 1 1 61 1.125 109.311 E94\n" "0 1 1 63 1.006 110.058 E94\n" "0 1 1 64 0.988 111.064 E94\n" "0 1 1 67 1.216 104.557 E94\n" "0 1 1 68 1.018 107.195 C94\n" "0 1 1 73 1.160 104.658 E94\n", "0 1 1 78 1.012 109.850 E94\n" "0 1 1 80 0.947 113.327 E94\n" "0 1 1 81 1.108 109.837 E94\n" "0 2 1 2 1.113 111.453 C94\n" "0 2 1 3 0.667 104.829 C94\n" "0 2 1 4 1.022 109.873 E94\n" "0 2 1 5 0.632 110.292 C94\n" "0 2 1 6 1.074 108.699 C94\n" "0 2 1 8 0.884 111.553 C94\n" "0 2 1 9 1.118 109.577 E94\n" "0 2 1 10 1.160 107.963 E94\n" "0 2 1 11 1.192 110.419 E94\n" "0 2 1 12 1.070 109.410 E94\n" "0 2 1 15 1.078 109.560 E94\n" "0 2 1 17 1.077 109.434 E94\n" "0 2 1 18 1.188 105.110 E94\n" "0 2 1 20 1.053 107.448 E94\n" "0 2 1 22 0.942 114.020 E94\n" "0 2 1 25 0.893 106.815 E94\n" "0 2 1 26 1.029 99.065 E94\n" "0 2 1 34 1.066 111.817 E94\n" "0 2 1 37 0.985 111.446 E94\n" "0 2 1 39 1.124 109.513 E94\n" "0 2 1 40 1.149 108.270 E94\n" "0 2 1 45 1.232 103.978 E94\n" "0 2 1 63 0.935 114.692 E94\n" "0 2 1 67 1.224 104.687 E94\n" "0 3 1 3 0.974 111.746 E94\n" "0 3 1 4 1.019 109.850 E94\n" "0 3 1 5 0.650 108.385 C94\n" "0 3 1 6 0.528 104.112 C94\n" "0 3 1 8 1.197 105.837 E94\n" "0 3 1 9 1.201 105.535 E94\n" "0 3 1 10 0.634 102.655 C94\n" "0 3 1 11 1.189 110.328 E94\n" "0 3 1 12 1.136 106.064 E94\n" "0 3 1 13 1.147 103.645 E94\n" "0 3 1 14 1.048 106.404 E94\n" "0 3 1 15 1.125 107.192 E94\n" "0 3 1 17 1.092 108.602 E94\n" "0 3 1 18 1.120 108.119 E94\n" "0 3 1 20 0.969 111.830 E94\n" "0 3 1 22 0.999 110.522 E94\n" "0 3 1 26 0.742 116.555 E94\n" "0 3 1 34 1.141 107.871 E94\n" "0 3 1 37 1.011 109.833 E94\n" "0 3 1 39 1.136 108.751 E94\n" "0 3 1 40 1.174 106.941 E94\n" "0 3 1 41 1.033 108.216 E94\n" "0 3 1 45 1.221 104.281 E94\n" "0 3 1 63 1.069 107.077 E94\n", "0 3 1 64 1.028 109.186 E94\n" "0 3 1 81 1.167 107.327 E94\n" "0 4 1 4 0.954 114.186 E94\n" "0 4 1 5 0.615 111.417 X94\n" "0 4 1 6 1.273 109.977 E94\n" "0 4 1 8 1.099 111.063 E94\n" "0 4 1 9 1.187 106.750 E94\n" "0 4 1 10 1.117 110.488 E94\n" "0 4 1 13 1.021 110.047 E94\n" "0 4 1 15 1.028 112.432 E94\n" "0 4 1 18 1.187 105.351 E94\n" "0 4 1 22 1.174 102.556 E94\n" "0 4 1 26 0.853 108.999 E94\n" "0 4 1 34 1.148 108.160 E94\n" "0 4 1 37 0.993 111.424 E94\n" "0 5 1 5 0.516 108.836 C94\n" "0 5 1 6 0.781 108.577 C94\n" "0 5 1 8 0.653 110.297 C94\n" "0 5 1 9 0.733 109.894 C94\n" "0 5 1 10 0.740 107.646 C94\n" "0 5 1 11 0.875 107.897 C94\n" "0 5 1 12 0.698 108.162 C94\n" "0 5 1 13 0.613 106.049 E94\n" "0 5 1 14 0.508 113.019 E94\n" "0 5 1 15 0.576 109.609 C94\n" "0 5 1 17 0.634 107.944 X94\n" "0 5 1 18 0.663 106.855 X94\n" "0 5 1 19 0.450 113.195 X94\n" "0 5 1 20 0.706 111.000 C94\n" "0 5 1 22 0.618 110.380 E94\n" "0 5 1 25 0.487 109.486 X94\n" "0 5 1 26 0.466 111.172 X94\n" "0 5 1 34 0.872 106.224 C94\n" "0 5 1 35 0.644 125.663 X94\n" "0 5 1 37 0.627 109.491 C94\n" "0 5 1 39 0.811 106.299 C94\n" "0 5 1 40 0.719 109.870 C94\n" "0 5 1 41 0.525 108.904 C94\n" "0 5 1 43 0.692 109.083 X94\n" "0 5 1 45 0.741 105.197 X94\n" "0 5 1 46 0.719 106.735 X94\n" "0 5 1 54 0.874 106.973 C94\n" "0 5 1 55 0.861 108.507 C94\n" "0 5 1 56 0.814 108.223 C94\n" "0 5 1 57 0.626 110.420 E94\n" "0 5 1 58 0.750 105.481 E94\n" "0 5 1 61 0.710 109.227 X94\n" "0 5 1 62 0.655 113.035 X94\n" "0 5 1 63 0.621 110.467 E94\n" "0 5 1 64 0.622 110.457 E94\n" "0 5 1 67 0.732 106.474 E94\n", "0 5 1 68 0.748 103.817 C94\n" "0 5 1 72 0.547 116.576 X94\n" "0 5 1 73 0.633 107.153 X94\n" "0 5 1 78 0.640 109.078 E94\n" "0 5 1 80 0.684 105.144 E94\n" "0 5 1 81 0.721 107.870 E94\n" "0 6 1 6 1.156 111.368 C94\n" "0 6 1 8 1.333 112.223 E94\n" "0 6 1 9 1.224 116.950 E94\n" "0 6 1 10 1.432 108.568 E94\n" "0 6 1 11 1.593 106.900 E94\n" "0 6 1 15 1.273 112.012 E94\n" "0 6 1 17 1.348 108.655 E94\n" "0 6 1 19 0.906 117.214 E94\n" "0 6 1 20 1.293 108.202 E94\n" "0 6 1 22 1.287 108.913 E94\n" "0 6 1 25 1.171 103.598 E94\n" "0 6 1 26 0.888 118.433 E94\n" "0 6 1 34 1.257 114.975 E94\n" "0 6 1 37 0.878 107.978 C94\n" "0 6 1 39 1.485 106.464 E94\n" "0 6 1 40 1.371 110.779 E94\n" "0 6 1 41 1.333 106.467 E94\n" "0 6 1 45 1.523 104.438 E94\n" "0 6 1 57 1.308 108.467 E94\n" "0 6 1 63 1.351 106.535 E94\n" "0 6 1 64 1.238 111.308 E94\n" "0 8 1 8 1.203 110.856 E94\n" "0 8 1 9 1.133 114.080 E94\n" "0 8 1 10 1.258 108.683 E94\n" "0 8 1 12 1.217 107.251 E94\n" "0 8 1 15 1.120 112.356 E94\n" "0 8 1 20 1.116 109.353 E94\n" "0 8 1 25 1.143 98.698 E94\n" "0 8 1 34 1.138 113.412 E94\n" "0 8 1 37 1.090 110.992 E94\n" "0 8 1 39 1.364 104.193 E94\n" "0 8 1 40 0.964 123.962 E94\n" "0 8 1 41 1.234 103.868 E94\n" "0 8 1 43 1.137 113.596 E94\n" "0 8 1 45 1.583 96.139 E94\n" "0 8 1 57 1.038 114.266 E94\n" "0 8 1 63 1.104 110.598 E94\n" "0 8 1 64 1.156 108.127 E94\n" "0 9 1 10 1.209 110.720 E94\n" "0 9 1 12 1.173 109.152 E94\n" "0 9 1 15 1.024 117.465 E94\n" "0 9 1 25 1.060 102.432 E94\n" "0 9 1 37 1.077 111.565 E94\n" "0 9 1 40 1.084 116.728 E94\n" "0 9 1 80 1.163 107.509 E94\n", "0 10 1 10 1.191 111.995 E94\n" "0 10 1 15 1.161 110.502 E94\n" "0 10 1 17 1.269 105.509 E94\n" "0 10 1 20 1.220 104.838 E94\n" "0 10 1 22 1.132 109.262 E94\n" "0 10 1 25 1.015 104.822 E94\n" "0 10 1 37 1.107 110.423 E94\n" "0 10 1 40 1.264 108.536 E94\n" "0 10 1 41 1.087 110.961 E94\n" "0 10 1 57 1.268 103.622 E94\n" "0 11 1 11 1.638 106.081 C94\n" "0 11 1 15 1.254 109.517 E94\n" "0 11 1 20 1.243 107.637 E94\n" "0 11 1 25 1.244 97.532 E94\n" "0 11 1 34 1.338 108.669 E94\n" "0 11 1 35 1.556 110.367 E94\n" "0 11 1 37 1.151 112.278 E94\n" "0 11 1 41 1.301 105.053 E94\n" "0 11 1 45 1.550 100.991 E94\n" "0 11 1 73 1.303 106.569 E94\n" "0 11 1 75 0.884 114.378 E94\n" "0 12 1 12 1.096 110.422 C94\n" "0 12 1 15 1.146 111.064 E94\n" "0 12 1 18 1.299 104.827 E94\n" "0 12 1 19 0.932 108.971 E94\n" "0 12 1 20 1.081 108.605 E94\n" "0 12 1 22 1.097 108.028 E94\n" "0 12 1 25 0.989 106.118 E94\n" "0 12 1 37 1.076 109.030 E94\n" "0 12 1 39 1.150 110.359 E94\n" "0 12 1 45 1.353 101.430 E94\n" "0 12 1 63 1.071 109.474 E94\n" "0 12 1 64 1.093 108.338 E94\n" "0 13 1 13 1.093 111.645 E94\n" "0 13 1 20 1.084 106.534 E94\n" "0 13 1 22 1.068 107.469 E94\n" "0 13 1 45 1.305 101.383 E94\n" "0 14 1 20 1.021 107.718 E94\n" "0 15 1 15 1.147 111.896 E94\n" "0 15 1 25 1.059 103.308 E94\n" "0 15 1 34 1.222 107.318 E94\n" "0 15 1 37 1.051 110.959 E94\n" "0 15 1 40 1.149 111.005 E94\n" "0 15 1 41 1.263 100.981 E94\n" "0 15 1 63 1.060 110.596 E94\n" "0 15 1 64 1.059 110.703 E94\n" "0 15 1 73 1.289 105.029 E94\n" "0 17 1 37 1.065 110.049 E94\n" "0 18 1 20 1.121 107.960 E94\n" "0 18 1 22 1.283 101.125 E94\n" "0 18 1 37 1.203 104.390 E94\n", "0 18 1 45 1.287 105.273 E94\n" "0 18 1 64 1.093 109.683 E94\n" "0 19 1 54 0.772 119.506 E94\n" "0 20 1 20 1.229 99.084 E94\n" "0 20 1 37 1.052 107.428 E94\n" "0 20 1 45 1.169 106.335 E94\n" "0 22 1 22 0.990 111.226 E94\n" "0 22 1 25 0.885 107.293 E94\n" "0 22 1 34 1.045 112.940 E94\n" "0 22 1 37 1.037 108.586 E94\n" "0 22 1 45 1.182 106.181 E94\n" "0 25 1 25 0.551 127.138 E94\n" "0 25 1 34 0.779 119.271 E94\n" "0 25 1 37 0.784 113.945 E94\n" "0 25 1 40 1.062 102.417 E94\n" "0 25 1 58 0.916 110.234 E94\n" "0 26 1 26 0.625 118.700 E94\n" "0 34 1 34 1.216 109.167 E94\n" "0 34 1 37 1.075 111.275 E94\n" "0 34 1 41 1.048 112.238 E94\n" "0 34 1 63 1.077 111.412 E94\n" "0 34 1 73 1.142 110.240 E94\n" "0 37 1 37 0.986 111.315 E94\n" "0 37 1 40 1.129 109.188 E94\n" "0 37 1 43 1.074 111.478 E94\n" "0 37 1 45 1.259 102.800 E94\n" "0 37 1 57 0.981 112.047 E94\n" "0 37 1 64 1.175 102.239 E94\n" "0 37 1 68 1.100 109.983 E94\n" "0 37 1 78 1.005 110.638 E94\n" "0 37 1 81 1.176 107.040 E94\n" "0 39 1 39 1.260 108.547 E94\n" "0 40 1 40 1.182 112.005 E94\n" "0 40 1 55 1.322 105.786 E94\n" "0 40 1 63 1.032 114.505 E94\n" "0 40 1 64 1.000 116.376 E94\n" "0 41 1 41 1.082 105.400 E94\n" "0 41 1 63 1.061 107.112 E94\n" "0 41 1 81 1.093 110.553 E94\n" "0 45 1 45 1.391 102.088 E94\n" "0 0 2 0 0.000 119.400 0:*-2-* MMFF DEF\n" "1 0 2 0 0.000 118.200 0:*-2-* MMFF DEF\n" "2 0 2 0 0.000 120.800 2::*-2-* MMFF DEF\n" "3 0 2 0 0.000 62.600 3::*-2-* MMFF DEF\n" "6 0 2 0 0.000 60.500 6:*-2-* MMFF DEF\n" "0 1 2 1 0.752 118.043 C94\n" "0 1 2 2 0.672 122.141 C94\n" "1 1 2 2 0.684 116.929 C94\n" "1 1 2 3 0.698 116.104 C94\n" "0 1 2 4 0.828 125.045 E94\n" "1 1 2 4 0.846 121.613 E94\n", "0 1 2 5 0.446 120.108 C94\n" "0 1 2 6 1.160 115.518 E94\n" "0 1 2 10 1.015 116.707 E94\n" "0 1 2 12 0.983 115.343 E94\n" "0 1 2 13 0.964 115.395 E94\n" "0 1 2 15 0.939 119.465 E94\n" "0 1 2 17 0.883 121.868 E94\n" "0 1 2 18 0.961 117.918 E94\n" "0 1 2 20 0.880 118.310 E94\n" "0 1 2 22 0.873 119.114 E94\n" "0 1 2 30 0.826 124.605 E94\n" "1 1 2 37 0.721 116.064 C94\n" "0 1 2 40 0.982 118.515 E94\n" "0 1 2 45 1.121 109.921 E94\n" "0 1 2 56 1.006 117.192 E94\n" "1 1 2 63 0.768 127.945 E94\n" "1 1 2 64 0.966 113.884 E94\n" "1 1 2 67 1.115 110.185 E94\n" "1 2 2 2 0.747 121.550 C94\n" "2 2 2 2 0.796 126.284 E94\n" "6 2 2 2 0.173 60.549 E94\n" "1 2 2 3 0.545 111.297 C94\n" "2 2 2 3 0.893 118.456 E94\n" "5 2 2 3 0.184 59.145 E94\n" "1 2 2 4 0.902 121.053 E94\n" "2 2 2 4 0.889 119.794 E94\n" "0 2 2 5 0.535 121.004 C94\n" "1 2 2 5 0.463 118.442 C94\n" "0 2 2 6 1.117 121.267 C94\n" "1 2 2 6 1.204 114.538 E94\n" "1 2 2 9 0.960 123.536 E94\n" "2 2 2 9 1.045 116.273 E94\n" "0 2 2 10 1.003 120.828 E94\n" "1 2 2 10 1.026 117.324 E94\n" "0 2 2 11 1.089 119.100 X94\n" "1 2 2 11 1.090 116.828 E94\n" "0 2 2 12 0.931 120.132 X94\n" "1 2 2 12 0.957 117.526 E94\n" "0 2 2 13 0.867 122.717 E94\n" "0 2 2 14 0.818 122.584 E94\n" "1 2 2 14 0.819 122.344 E94\n" "0 2 2 15 0.931 121.553 E94\n" "1 2 2 15 0.949 119.466 E94\n" "0 2 2 17 0.977 117.167 E94\n" "0 2 2 18 1.044 114.561 E94\n" "0 2 2 19 0.668 124.721 E94\n" "0 2 2 20 0.931 117.784 E94\n" "0 2 2 22 0.809 126.820 E94\n" "3 2 2 22 0.149 66.165 E94\n" "0 2 2 25 0.700 123.830 E94\n" "0 2 2 34 1.066 116.151 E94\n", "0 2 2 35 0.911 137.103 X94\n" "1 2 2 37 0.598 117.508 C94\n" "2 2 2 37 0.817 124.229 E94\n" "1 2 2 39 0.976 122.360 E94\n" "0 2 2 40 0.773 126.830 C94\n" "1 2 2 40 0.976 120.132 E94\n" "0 2 2 41 0.432 110.442 C94\n" "0 2 2 43 1.144 111.808 E94\n" "0 2 2 45 1.194 109.231 X94\n" "1 2 2 45 1.062 113.984 E94\n" "0 2 2 46 1.005 121.534 E94\n" "0 2 2 55 0.995 121.154 E94\n" "0 2 2 56 1.234 108.879 E94\n" "0 2 2 62 0.808 135.269 X94\n" "1 2 2 63 0.948 118.277 E94\n" "1 2 2 64 0.866 123.528 E94\n" "2 2 2 64 0.859 121.998 E94\n" "1 2 2 67 1.132 112.136 E94\n" "0 2 2 72 0.770 134.269 X94\n" "1 2 2 81 1.078 116.541 E94\n" "2 3 2 3 0.853 120.370 E94\n" "2 3 2 4 0.878 119.739 E94\n" "1 3 2 5 0.487 117.291 C94\n" "1 3 2 6 1.142 116.738 E94\n" "2 3 2 9 1.005 117.648 E94\n" "1 3 2 10 1.039 115.698 E94\n" "1 3 2 11 1.150 112.876 E94\n" "1 3 2 12 0.997 114.732 E94\n" "1 3 2 13 0.946 116.643 E94\n" "1 3 2 14 0.891 117.111 E94\n" "1 3 2 15 1.023 114.635 E94\n" "1 3 2 20 0.870 119.265 E94\n" "1 3 2 22 0.816 123.510 E94\n" "1 3 2 30 1.025 112.209 E94\n" "1 3 2 34 1.099 111.723 E94\n" "1 3 2 35 1.141 118.767 E94\n" "2 3 2 37 0.868 119.758 E94\n" "1 3 2 40 1.024 116.408 E94\n" "1 3 2 41 0.855 119.505 E94\n" "1 3 2 43 1.046 114.257 E94\n" "1 3 2 45 1.077 112.401 E94\n" "1 3 2 46 1.066 114.841 E94\n" "2 4 2 4 0.832 124.158 E94\n" "0 4 2 5 0.573 121.000 #E94\n" "1 4 2 5 0.545 120.000 #E94\n" "2 4 2 9 0.973 120.845 E94\n" "1 4 2 15 0.906 122.447 E94\n" "1 4 2 18 0.947 119.537 E94\n" "1 4 2 30 0.819 126.938 E94\n" "2 4 2 37 0.864 121.093 E94\n" "1 4 2 40 1.083 114.355 E94\n", "1 4 2 45 1.158 109.426 E94\n" "2 4 2 63 0.860 122.442 E94\n" "0 5 2 5 0.365 119.523 C94\n" "0 5 2 6 0.589 108.757 C94\n" "1 5 2 9 0.643 117.000 #E94\n" "0 5 2 10 0.667 114.859 E94\n" "0 5 2 11 0.795 108.186 X94\n" "0 5 2 12 0.622 110.650 X94\n" "0 5 2 13 0.566 113.513 E94\n" "0 5 2 15 0.546 119.562 E94\n" "0 5 2 17 0.492 124.000 #E94\n" "0 5 2 18 0.548 119.053 E94\n" "0 5 2 22 0.534 120.000 #E94\n" "0 5 2 25 0.395 124.000 #E94\n" "0 5 2 30 0.572 120.000 #E94\n" "0 5 2 35 0.682 124.164 X94\n" "1 5 2 37 0.491 117.423 C94\n" "1 5 2 39 0.655 115.724 E94\n" "0 5 2 40 0.568 112.322 C94\n" "0 5 2 41 0.294 123.706 C94\n" "0 5 2 45 0.728 107.774 X94\n" "0 5 2 55 0.651 116.000 #E94\n" "0 5 2 62 0.568 125.344 X94\n" "1 5 2 63 0.550 120.000 #E94\n" "1 5 2 64 0.546 120.000 #E94\n" "0 5 2 72 0.531 122.009 X94\n" "1 5 2 81 0.665 115.000 #E94\n" "1 6 2 9 1.214 120.520 E94\n" "0 6 2 10 1.311 115.921 E94\n" "0 6 2 22 1.080 120.560 E94\n" "0 6 2 35 1.172 132.391 E94\n" "1 6 2 37 1.198 114.441 E94\n" "0 6 2 40 1.239 119.073 E94\n" "0 6 2 45 1.637 102.438 E94\n" "1 9 2 10 1.098 119.802 E94\n" "1 9 2 15 0.915 127.574 E94\n" "2 9 2 37 0.981 119.536 E94\n" "1 9 2 40 0.922 130.521 E94\n" "0 10 2 12 1.144 112.723 E94\n" "0 10 2 15 1.078 117.519 E94\n" "0 10 2 25 1.144 100.818 E94\n" "1 10 2 37 1.021 117.139 E94\n" "0 10 2 40 0.988 126.034 E94\n" "0 10 2 41 0.951 120.000 E94\n" "0 12 2 12 1.012 119.105 E94\n" "0 12 2 17 1.110 114.206 E94\n" "0 12 2 18 1.201 110.553 E94\n" "0 12 2 19 0.704 126.646 E94\n" "0 12 2 20 0.903 120.563 E94\n" "0 12 2 30 0.892 122.753 E94\n" "1 12 2 37 0.976 116.136 E94\n", "0 12 2 45 1.076 115.543 E94\n" "0 13 2 18 1.132 113.616 E94\n" "0 15 2 15 0.996 123.027 E94\n" "0 15 2 35 0.950 133.654 E94\n" "1 15 2 37 1.007 115.757 E94\n" "0 15 2 40 0.895 128.924 E94\n" "0 17 2 17 1.051 117.955 E94\n" "1 18 2 37 1.183 106.608 E94\n" "0 22 2 22 0.841 122.108 E94\n" "3 22 2 22 0.180 58.963 E94\n" "1 22 2 37 0.806 124.693 E94\n" "1 30 2 37 0.849 123.816 E94\n" "1 35 2 37 0.991 128.032 E94\n" "0 40 2 40 0.949 128.436 E94\n" "0 40 2 56 1.072 120.987 E94\n" "1 40 2 63 0.922 124.268 E94\n" "1 40 2 64 0.955 121.881 E94\n" "0 40 2 72 0.820 135.317 E94\n" "0 45 2 45 1.284 108.095 E94\n" "2 64 2 64 0.888 120.342 E94\n" "0 0 3 0 0.000 117.300 0:*-3-* MMFF DEF\n" "1 0 3 0 0.000 115.800 1:*-3-* MMFF DEF\n" "4 0 3 0 0.000 90.800 4:*-3-* MMFF DEF\n" "7 0 3 0 0.000 91.100 7:*-3-* MMFF DEF\n" "8 0 3 0 0.000 88.900 7:*-3-* MMFF DEF\n" "0 1 3 1 1.151 118.016 C94\n" "1 1 3 2 1.106 116.853 C94\n" "1 1 3 3 1.214 114.612 C94\n" "0 1 3 5 0.808 117.280 C94\n" "0 1 3 6 1.043 109.716 C94\n" "0 1 3 7 0.938 124.410 C94\n" "0 1 3 9 0.978 119.788 E94\n" "1 1 3 9 1.038 115.132 E94\n" "0 1 3 10 0.984 112.735 C94\n" "0 1 3 12 1.007 113.972 E94\n" "0 1 3 15 1.024 113.612 E94\n" "0 1 3 16 0.949 119.986 E94\n" "0 1 3 18 0.732 134.097 E94\n" "0 1 3 20 0.830 120.312 E94\n" "0 1 3 22 0.928 115.001 E94\n" "0 1 3 35 1.058 122.808 E94\n" "1 1 3 37 1.051 115.191 C94\n" "1 1 3 39 1.178 107.895 E94\n" "0 1 3 40 0.979 118.457 E94\n" "0 1 3 41 0.897 116.681 E94\n" "0 1 3 43 1.046 113.731 X94\n" "0 1 3 45 1.132 109.019 E94\n" "0 1 3 51 1.160 116.573 X94\n" "0 1 3 53 1.052 115.065 X94\n" "0 1 3 54 1.135 111.322 E94\n" "1 1 3 58 1.162 108.129 E94\n", "0 1 3 62 1.119 111.523 E94\n" "1 1 3 63 0.909 117.001 E94\n" "1 1 3 64 0.887 118.253 E94\n" "0 1 3 67 1.142 110.666 E94\n" "0 1 3 74 1.010 116.851 X94\n" "0 1 3 75 0.646 128.037 X94\n" "2 2 3 2 0.976 112.562 E94\n" "6 2 3 2 0.157 62.792 E94\n" "2 2 3 3 0.957 113.239 E94\n" "1 2 3 5 0.901 115.350 C94\n" "1 2 3 6 0.932 106.510 C94\n" "1 2 3 7 0.936 122.623 C94\n" "1 2 3 9 0.831 122.253 C94\n" "2 2 3 9 1.120 111.408 E94\n" "1 2 3 10 1.042 111.721 C94\n" "1 2 3 12 0.901 120.769 E94\n" "1 2 3 15 1.057 112.105 E94\n" "1 2 3 16 0.881 124.850 E94\n" "1 2 3 22 0.969 113.027 E94\n" "1 2 3 25 0.853 109.794 E94\n" "2 2 3 37 0.973 112.935 E94\n" "2 2 3 39 1.197 107.592 E94\n" "1 2 3 40 0.910 123.437 E94\n" "1 2 3 43 1.105 111.169 E94\n" "1 2 3 53 1.082 114.032 E94\n" "1 2 3 54 1.012 118.588 E94\n" "1 2 3 55 1.186 107.278 E94\n" "1 2 3 56 1.151 108.909 E94\n" "2 2 3 63 0.918 116.947 E94\n" "2 2 3 64 1.033 110.084 E94\n" "1 2 3 67 1.022 117.597 E94\n" "2 3 3 3 0.822 121.775 E94\n" "8 3 3 3 1.280 89.965 E94\n" "1 3 3 5 0.943 113.762 C94\n" "1 3 3 6 0.935 103.030 C94\n" "1 3 3 7 0.919 117.024 C94\n" "1 3 3 9 1.050 115.704 E94\n" "1 3 3 10 1.129 110.421 E94\n" "1 3 3 12 1.053 111.492 E94\n" "1 3 3 15 1.390 97.562 E94\n" "1 3 3 16 1.092 111.888 E94\n" "1 3 3 20 0.977 110.910 E94\n" "1 3 3 22 1.010 110.295 E94\n" "8 3 3 30 1.353 87.789 E94\n" "2 3 3 37 0.932 114.949 E94\n" "2 3 3 39 1.237 105.384 E94\n" "1 3 3 40 1.003 117.124 E94\n" "1 3 3 41 0.790 124.361 E94\n" "1 3 3 45 0.919 121.023 E94\n" "1 3 3 53 1.170 109.169 E94\n" "2 3 3 63 0.981 112.685 E94\n", "2 3 3 64 0.880 118.840 E94\n" "1 3 3 67 1.119 111.860 E94\n" "1 4 3 6 1.269 111.750 E94\n" "1 4 3 7 1.126 120.852 E94\n" "1 4 3 9 1.192 109.833 E94\n" "2 4 3 37 0.964 114.081 E94\n" "0 5 3 5 0.594 116.699 C94\n" "0 5 3 6 0.819 108.253 C94\n" "0 5 3 7 0.670 123.439 C94\n" "0 5 3 9 0.623 119.491 C94\n" "1 5 3 9 0.638 117.168 E94\n" "0 5 3 10 0.874 111.761 C94\n" "0 5 3 16 0.522 124.405 E94\n" "1 5 3 37 0.564 116.400 E94\n" "0 5 3 40 0.959 111.684 C94\n" "0 5 3 53 0.644 118.000 #E94\n" "0 5 3 54 0.816 115.471 C94\n" "1 5 3 63 0.559 118.000 #E94\n" "1 5 3 64 0.566 117.000 #E94\n" "0 5 3 67 0.700 113.698 E94\n" "0 6 3 6 1.678 109.094 E94\n" "0 6 3 7 1.155 124.425 C94\n" "0 6 3 9 1.275 119.478 E94\n" "1 6 3 9 1.416 111.868 E94\n" "0 6 3 10 1.405 112.187 E94\n" "0 6 3 16 1.269 116.317 E94\n" "0 6 3 20 1.182 113.581 E94\n" "4 6 3 20 1.495 93.130 E94\n" "0 6 3 22 1.276 110.826 E94\n" "7 6 3 30 1.530 93.191 E94\n" "1 6 3 37 0.808 102.881 C94\n" "1 6 3 39 1.611 104.655 E94\n" "0 6 3 40 1.371 113.565 E94\n" "0 6 3 41 1.477 102.658 E94\n" "0 6 3 43 1.330 114.183 E94\n" "0 6 3 48 1.315 115.328 E94\n" "0 6 3 51 1.409 120.427 E94\n" "0 6 3 54 1.495 110.510 E94\n" "0 6 3 62 1.421 112.542 E94\n" "1 6 3 63 1.339 109.082 E94\n" "1 6 3 64 1.267 111.993 E94\n" "1 6 3 80 1.256 113.698 E94\n" "1 7 3 9 1.147 127.084 E94\n" "0 7 3 10 0.907 127.152 C94\n" "0 7 3 12 0.984 130.049 E94\n" "0 7 3 15 1.101 123.313 E94\n" "0 7 3 20 0.713 129.492 C94\n" "0 7 3 22 1.093 121.851 E94\n" "1 7 3 30 0.972 129.010 E94\n" "1 7 3 37 0.734 119.968 C94\n" "1 7 3 39 1.352 116.727 E94\n", "0 7 3 41 1.281 112.087 E94\n" "0 7 3 43 1.163 124.549 X94\n" "0 7 3 48 1.114 127.879 E94\n" "1 7 3 54 1.288 114.184 E94\n" "0 7 3 55 1.258 120.056 E94\n" "0 7 3 56 1.175 123.854 E94\n" "1 7 3 58 1.323 117.081 E94\n" "0 7 3 62 1.129 129.349 E94\n" "1 7 3 63 1.036 126.456 E94\n" "1 7 3 64 1.071 124.133 E94\n" "1 7 3 78 0.955 132.047 E94\n" "1 9 3 9 1.119 120.094 E94\n" "2 9 3 9 1.021 124.131 E94\n" "0 9 3 10 1.105 120.697 E94\n" "1 9 3 10 1.154 116.608 E94\n" "0 9 3 12 1.056 118.046 E94\n" "0 9 3 15 1.036 119.679 E94\n" "1 9 3 15 1.042 118.787 E94\n" "1 9 3 16 0.936 127.665 E94\n" "0 9 3 17 1.035 117.902 E94\n" "0 9 3 18 1.121 114.698 E94\n" "0 9 3 20 0.951 120.437 E94\n" "0 9 3 22 1.040 116.861 E94\n" "0 9 3 25 0.955 109.442 E94\n" "0 9 3 35 1.054 134.470 E94\n" "1 9 3 37 0.997 119.569 E94\n" "2 9 3 37 1.060 114.740 E94\n" "0 9 3 40 0.844 128.078 C94\n" "1 9 3 40 1.018 124.152 E94\n" "0 9 3 41 1.114 112.513 E94\n" "0 9 3 45 1.497 102.140 E94\n" "2 9 3 54 1.244 108.056 E94\n" "1 9 3 57 1.038 118.096 E94\n" "1 9 3 63 1.004 120.054 E94\n" "1 9 3 64 1.053 117.060 E94\n" "1 9 3 80 0.959 124.150 E94\n" "0 10 3 10 1.612 114.923 C94\n" "0 10 3 15 1.167 112.206 E94\n" "0 10 3 16 1.005 123.150 E94\n" "0 10 3 18 1.299 106.052 E94\n" "0 10 3 20 1.019 115.213 E94\n" "4 10 3 20 1.338 92.724 E94\n" "0 10 3 22 1.076 113.651 E94\n" "7 10 3 30 1.438 90.508 E94\n" "0 10 3 35 1.223 122.649 E94\n" "1 10 3 37 1.101 112.495 E94\n" "1 10 3 39 1.434 104.419 E94\n" "0 10 3 40 1.093 119.697 E94\n" "0 10 3 43 1.144 115.929 E94\n" "0 10 3 51 1.375 114.685 E94\n" "0 10 3 55 1.286 109.590 E94\n", "0 10 3 56 1.200 113.168 E94\n" "1 10 3 63 1.075 114.623 E94\n" "1 10 3 64 1.098 113.233 E94\n" "1 10 3 78 1.182 109.543 E94\n" "0 11 3 40 1.296 113.244 E94\n" "0 11 3 75 0.850 120.964 E94\n" "0 12 3 40 1.095 115.284 E94\n" "1 12 3 63 0.965 117.217 E94\n" "0 12 3 74 1.110 116.502 E94\n" "0 15 3 15 1.109 115.620 E94\n" "0 15 3 16 0.981 124.329 E94\n" "0 15 3 17 1.191 110.607 E94\n" "0 15 3 18 1.061 118.034 E94\n" "4 15 3 20 1.345 91.041 E94\n" "1 15 3 30 1.026 113.753 E94\n" "1 15 3 37 1.037 113.305 E94\n" "0 15 3 40 1.066 117.388 E94\n" "1 15 3 57 0.896 122.260 E94\n" "0 15 3 67 1.407 102.583 E94\n" "0 15 3 74 1.076 119.117 E94\n" "1 16 3 30 0.991 117.695 E94\n" "0 16 3 35 1.030 130.230 E94\n" "1 16 3 37 0.934 121.415 E94\n" "1 16 3 39 1.004 123.196 E94\n" "0 16 3 62 0.963 126.347 E94\n" "1 16 3 63 1.006 117.454 E94\n" "1 16 3 64 1.064 114.110 E94\n" "0 17 3 17 0.939 123.528 E94\n" "1 18 3 37 0.948 118.188 E94\n" "4 20 3 20 1.495 94.800 C94\n" "4 20 3 22 1.286 89.459 E94\n" "7 20 3 37 1.282 89.733 E94\n" "4 20 3 43 1.384 90.526 E94\n" "0 22 3 22 0.932 115.334 E94\n" "4 22 3 22 1.496 83.915 E94\n" "1 22 3 37 0.940 114.995 E94\n" "1 25 3 37 0.677 123.404 E94\n" "0 25 3 67 0.661 131.520 E94\n" "2 37 3 37 0.933 115.566 E94\n" "1 37 3 40 0.987 118.790 E94\n" "1 37 3 41 0.864 119.565 E94\n" "1 37 3 43 1.125 110.383 X94\n" "1 37 3 45 1.120 110.268 E94\n" "1 37 3 54 1.033 117.645 E94\n" "1 37 3 62 1.085 114.132 E94\n" "2 37 3 63 0.934 116.163 E94\n" "2 37 3 64 0.955 114.701 E94\n" "1 37 3 67 1.084 114.460 E94\n" "2 39 3 39 1.231 112.582 E94\n" "0 40 3 40 1.146 117.002 C94\n" "1 40 3 63 0.888 126.089 E94\n", "1 40 3 64 1.145 110.889 E94\n" "0 40 3 75 0.790 122.163 E94\n" "0 45 3 53 1.382 105.849 E94\n" "1 55 3 64 1.267 104.747 E94\n" "2 64 3 64 0.989 113.280 E94\n" "0 0 4 0 0.000 180.000 0:*-4-* MMFF DEF\n" "1 0 4 0 0.000 180.000 1:*-4-* MMFF DEF\n" "0 1 4 4 0.423 180.000 E94\n" "0 1 4 42 0.463 180.000 E94\n" "0 2 4 2 0.442 180.000 E94\n" "1 2 4 4 0.432 180.000 E94\n" "0 2 4 30 0.444 180.000 E94\n" "1 2 4 42 0.474 180.000 E94\n" "1 3 4 4 0.427 180.000 E94\n" "1 3 4 42 0.469 180.000 E94\n" "0 4 4 5 0.281 180.000 E94\n" "0 4 4 6 0.551 180.000 E94\n" "0 4 4 10 0.486 180.000 E94\n" "1 4 4 37 0.430 180.000 E94\n" "0 7 4 9 0.648 180.000 E94\n" "1 9 4 42 0.537 180.000 E94\n" "0 15 4 42 0.487 180.000 E94\n" "0 20 4 42 0.469 180.000 E94\n" "0 22 4 42 0.472 180.000 E94\n" "1 37 4 42 0.472 180.000 E94\n" "0 42 4 43 0.541 180.000 E94\n" "1 42 4 63 0.474 180.000 E94\n" "1 42 4 64 0.473 180.000 E94\n" "0 0 6 0 0.000 110.400 0:*-6-* MMFF DEF\n" "3 0 6 0 0.000 57.900 3::*-6-* MMFF DEF\n" "4 0 6 0 0.000 90.200 4:*-6-* MMFF DEF\n" "0 1 6 1 1.197 106.926 C94\n" "0 1 6 2 0.967 103.614 C94\n" "0 1 6 3 0.923 108.055 C94\n" "0 1 6 6 1.884 103.905 E94\n" "0 1 6 8 1.629 105.422 E94\n" "0 1 6 9 1.628 106.496 E94\n" "0 1 6 10 1.656 105.317 E94\n" "0 1 6 15 1.480 111.230 E94\n" "0 1 6 17 1.493 111.951 E94\n" "0 1 6 18 1.370 116.346 E94\n" "0 1 6 19 1.093 114.943 E94\n" "0 1 6 20 1.316 112.833 E94\n" "0 1 6 21 0.793 106.503 C94\n" "0 1 6 22 1.391 109.759 E94\n" "0 1 6 25 1.095 115.581 X94\n" "0 1 6 26 1.170 112.081 E94\n" "0 1 6 37 1.075 102.846 C94\n" "0 1 6 40 1.719 103.733 E94\n" "0 1 6 41 1.454 109.046 E94\n" "0 1 6 43 1.642 105.462 E94\n", "0 1 6 45 1.642 105.875 X94\n" "0 1 6 63 1.449 109.545 E94\n" "0 1 6 64 1.512 106.848 E94\n" "0 2 6 2 1.354 113.339 E94\n" "0 2 6 3 0.671 98.438 C94\n" "0 2 6 18 1.365 117.169 E94\n" "0 2 6 25 1.025 120.078 E94\n" "0 2 6 29 0.816 105.727 C94\n" "0 2 6 37 1.418 110.694 E94\n" "0 2 6 57 1.341 114.785 E94\n" "0 3 6 3 1.455 110.067 E94\n" "0 3 6 4 1.409 112.404 E94\n" "0 3 6 8 1.648 105.872 E94\n" "0 3 6 10 1.596 108.437 E94\n" "0 3 6 18 1.274 121.468 E94\n" "0 3 6 19 1.019 119.840 E94\n" "0 3 6 20 1.379 111.381 E94\n" "4 3 6 20 1.748 91.216 E94\n" "0 3 6 22 1.328 113.491 E94\n" "0 3 6 24 0.583 111.948 C94\n" "0 3 6 25 1.006 121.410 E94\n" "0 3 6 29 0.876 111.417 E94\n" "0 3 6 37 0.614 95.300 C94\n" "0 3 6 64 1.424 111.483 E94\n" "0 4 6 18 1.423 115.233 E94\n" "0 6 6 21 1.362 95.697 E94\n" "0 8 6 21 0.832 99.409 C94\n" "0 9 6 21 1.115 101.592 E94\n" "0 10 6 21 0.923 99.688 C94\n" "0 18 6 18 1.334 125.242 E94\n" "0 18 6 33 0.812 115.364 X94\n" "0 18 6 37 1.429 114.473 E94\n" "0 18 6 39 1.558 114.152 E94\n" "0 18 6 43 1.710 108.479 E94\n" "0 19 6 19 0.642 141.096 E94\n" "0 19 6 21 0.597 118.204 X94\n" "0 19 6 37 0.941 124.421 E94\n" "4 20 6 20 1.339 89.100 C94\n" "0 20 6 21 0.944 104.587 E94\n" "0 20 6 37 1.394 110.394 E94\n" "0 21 6 40 1.124 101.417 E94\n" "0 21 6 43 1.058 103.253 E94\n" "0 21 6 54 1.175 100.000 #E94\n" "0 21 6 55 1.139 101.000 #E94\n" "3 22 6 22 0.242 58.680 E94\n" "3 22 6 43 0.279 57.087 E94\n" "0 24 6 25 0.607 118.533 X94\n" "0 25 6 25 0.777 129.375 E94\n" "0 25 6 37 1.099 115.923 E94\n" "0 26 6 37 1.090 116.692 E94\n" "0 29 6 30 0.986 108.000 #E94\n", "0 29 6 37 0.726 105.409 C94\n" "0 29 6 64 0.923 108.922 E94\n" "0 37 6 37 1.462 108.967 E94\n" "0 37 6 58 1.607 108.274 E94\n" "0 0 8 0 0.000 110.400 0:*-8-* MMFF DEF\n" "3 0 8 0 0.000 58.500 3::*-8-* MMFF DEF\n" "4 0 8 0 0.000 95.000 4:*-8-* MMFF DEF\n" "0 1 8 1 1.090 107.018 C94\n" "0 1 8 6 1.297 102.829 C94\n" "0 1 8 8 1.347 105.708 E94\n" "0 1 8 9 1.182 114.240 E94\n" "0 1 8 10 1.307 108.079 E94\n" "0 1 8 15 1.085 118.283 E94\n" "0 1 8 17 1.096 117.478 E94\n" "0 1 8 19 0.779 122.759 E94\n" "0 1 8 20 1.221 105.873 E94\n" "0 1 8 22 1.147 109.200 E94\n" "0 1 8 23 0.763 109.062 C94\n" "0 1 8 25 0.865 117.482 E94\n" "0 1 8 26 0.926 112.630 E94\n" "0 1 8 40 1.363 105.609 E94\n" "0 1 8 45 1.266 110.149 E94\n" "0 1 8 46 1.265 111.092 E94\n" "0 6 8 6 1.776 107.296 E94\n" "0 6 8 17 1.664 105.334 E94\n" "0 6 8 22 1.456 107.100 E94\n" "0 6 8 23 0.861 100.510 C94\n" "3 8 8 8 0.230 60.000 E94\n" "0 8 8 23 0.792 108.917 E94\n" "0 8 8 25 1.068 110.595 E94\n" "0 8 8 26 1.047 110.816 E94\n" "0 9 8 23 0.832 108.864 E94\n" "4 10 8 20 1.805 84.690 E94\n" "0 10 8 23 0.846 106.788 E94\n" "0 12 8 22 1.227 107.439 E94\n" "0 15 8 19 0.845 125.674 E94\n" "4 17 8 17 1.198 110.056 E94\n" "0 17 8 23 0.647 116.842 E94\n" "0 19 8 23 0.542 112.000 #E94\n" "4 20 8 20 1.103 90.370 C94\n" "0 20 8 23 0.684 113.359 C94\n" "3 22 8 22 0.209 57.087 E94\n" "0 22 8 23 0.697 110.033 E94\n" "0 22 8 25 0.896 115.361 E94\n" "0 23 8 23 0.595 105.998 C94\n" "0 23 8 25 0.510 117.000 #E94\n" "0 23 8 26 0.553 110.959 E94\n" "0 23 8 34 0.808 109.000 #E94\n" "0 23 8 39 0.757 111.820 E94\n" "0 23 8 40 0.819 108.120 E94\n" "0 23 8 43 0.857 106.222 E94\n", "0 23 8 55 0.868 106.000 #E94\n" "0 23 8 56 0.876 105.092 E94\n" "0 0 9 0 0.000 111.500 0:*-9-* MMFF DEF\n" "1 0 9 0 0.000 109.100 1:*-9-* MMFF DEF\n" "0 1 9 3 0.878 106.409 C94\n" "0 1 9 9 1.306 110.005 E94\n" "0 1 9 53 1.216 113.995 X94\n" "0 1 9 67 1.391 106.413 E94\n" "1 2 9 3 1.242 109.856 E94\n" "1 2 9 9 1.306 112.528 E94\n" "1 3 9 3 1.204 111.488 E94\n" "1 3 9 4 1.194 113.272 E94\n" "0 3 9 6 1.579 106.872 E94\n" "0 3 9 8 1.386 108.822 E94\n" "1 3 9 9 1.390 108.355 E94\n" "0 3 9 10 1.365 109.548 E94\n" "0 3 9 12 1.373 103.303 E94\n" "0 3 9 15 1.265 110.780 E94\n" "0 3 9 18 1.205 114.743 E94\n" "0 3 9 20 1.198 109.751 E94\n" "0 3 9 25 0.873 119.927 E94\n" "0 3 9 27 0.818 108.779 C94\n" "0 3 9 34 1.355 108.199 E94\n" "0 3 9 35 1.511 109.907 E94\n" "1 3 9 37 1.185 111.663 E94\n" "1 3 9 39 1.396 108.538 E94\n" "0 3 9 40 1.365 109.440 E94\n" "0 3 9 41 1.169 112.551 E94\n" "0 3 9 45 1.369 109.796 E94\n" "1 3 9 53 1.351 110.578 E94\n" "1 3 9 54 1.643 98.943 E94\n" "0 3 9 55 1.431 106.195 E94\n" "0 3 9 56 1.375 109.289 E94\n" "1 3 9 57 1.125 115.780 E94\n" "1 3 9 63 1.247 109.989 E94\n" "1 3 9 64 1.302 106.461 E94\n" "1 3 9 78 1.323 106.641 E94\n" "1 3 9 81 1.567 101.581 E94\n" "0 4 9 19 0.456 161.741 E94\n" "1 4 9 67 1.402 108.868 E94\n" "0 6 9 67 1.794 105.043 E94\n" "0 9 9 10 1.518 109.154 E94\n" "1 9 9 37 1.397 108.014 E94\n" "0 9 9 40 1.594 106.413 E94\n" "0 9 9 62 1.390 114.417 E94\n" "1 9 9 63 1.320 112.325 E94\n" "1 9 9 64 1.352 109.711 E94\n" "1 37 9 53 1.343 110.162 E94\n" "1 37 9 67 1.296 111.871 E94\n" "0 40 9 67 1.538 108.056 E94\n" "1 53 9 64 1.318 111.149 E94\n", "0 0 10 0 0.000 117.500 0:*-10-* MMFF DEF\n" "3 0 10 0 0.000 58.900 3::*-10-* MMFF DEF\n" "4 0 10 0 0.000 92.900 4:*-10-* MMFF DEF\n" "0 1 10 1 1.117 117.909 C94\n" "0 1 10 2 1.004 118.916 E94\n" "0 1 10 3 0.821 119.600 C94\n" "0 1 10 6 1.179 108.865 C94\n" "0 1 10 8 1.137 116.189 E94\n" "0 1 10 9 1.132 117.005 E94\n" "0 1 10 10 1.247 111.009 E94\n" "0 1 10 17 1.014 122.388 E94\n" "0 1 10 20 0.960 119.679 E94\n" "0 1 10 25 0.745 125.390 E94\n" "0 1 10 28 0.552 120.066 C94\n" "0 1 10 37 1.038 116.332 E94\n" "0 1 10 39 1.060 120.838 E94\n" "0 1 10 40 1.194 113.314 E94\n" "0 1 10 41 1.031 118.033 E94\n" "0 1 10 45 1.268 109.599 E94\n" "0 1 10 63 0.949 122.185 E94\n" "0 1 10 64 0.960 121.315 E94\n" "0 2 10 2 1.146 112.878 E94\n" "0 2 10 3 1.000 120.703 E94\n" "0 2 10 6 1.405 111.609 E94\n" "0 2 10 20 1.132 111.544 E94\n" "0 2 10 28 0.638 118.553 E94\n" "0 2 10 37 0.977 121.506 E94\n" "0 3 10 3 0.709 120.274 C94\n" "0 3 10 4 0.864 130.236 E94\n" "0 3 10 6 0.960 110.133 C94\n" "0 3 10 8 1.168 116.075 E94\n" "4 3 10 8 1.527 93.608 E94\n" "0 3 10 9 1.174 116.443 E94\n" "0 3 10 10 1.184 115.377 E94\n" "0 3 10 13 0.998 118.867 E94\n" "0 3 10 14 0.871 124.162 E94\n" "0 3 10 15 1.076 118.969 E94\n" "0 3 10 17 1.132 116.612 E94\n" "0 3 10 20 0.936 122.540 E94\n" "4 3 10 20 1.371 93.349 E94\n" "0 3 10 22 0.975 120.929 E94\n" "0 3 10 25 0.794 122.157 E94\n" "0 3 10 26 0.848 117.912 E94\n" "0 3 10 28 0.575 120.277 C94\n" "0 3 10 34 1.251 112.201 E94\n" "0 3 10 35 1.395 112.633 E94\n" "0 3 10 37 1.023 118.596 E94\n" "0 3 10 40 1.216 113.680 E94\n" "0 3 10 41 1.098 115.913 E94\n" "0 3 10 45 1.212 113.447 E94\n" "0 3 10 63 1.091 115.381 E94\n", "0 3 10 64 1.048 117.574 E94\n" "0 4 10 20 0.816 131.702 E94\n" "0 6 10 28 0.829 113.214 E94\n" "0 6 10 37 1.393 111.476 E94\n" "0 8 10 28 0.703 117.160 E94\n" "0 8 10 37 1.167 115.599 E94\n" "0 9 10 26 0.847 123.206 E94\n" "0 9 10 28 0.751 114.501 E94\n" "0 9 10 37 1.222 113.553 E94\n" "0 9 10 39 1.310 115.309 E94\n" "0 10 10 28 0.735 114.715 E94\n" "0 10 10 41 1.237 113.743 E94\n" "0 15 10 28 0.614 119.033 E94\n" "4 20 10 20 1.381 91.694 E94\n" "0 20 10 28 0.555 123.394 E94\n" "0 20 10 37 1.006 117.703 E94\n" "3 22 10 22 0.202 58.894 E94\n" "0 22 10 28 0.605 119.583 E94\n" "0 25 10 28 0.447 122.785 E94\n" "0 28 10 28 0.435 115.630 C94\n" "0 28 10 34 0.757 113.000 #E94\n" "0 28 10 35 0.836 114.000 #E94\n" "0 28 10 37 0.628 118.227 E94\n" "0 28 10 40 0.754 113.000 #E94\n" "0 28 10 41 0.560 128.067 E94\n" "0 28 10 63 0.640 118.099 E94\n" "0 28 10 64 0.643 117.575 E94\n" "0 37 10 40 1.232 112.412 E94\n" "0 0 15 0 0.000 97.900 0:*-15-* MMFF DEF\n" "4 0 15 0 0.000 80.200 4:*-15-* MMFF DEF\n" "0 1 15 1 1.654 97.335 C94\n" "0 1 15 2 1.321 97.853 E94\n" "0 1 15 3 1.325 97.326 E94\n" "0 1 15 4 1.344 97.370 E94\n" "0 1 15 9 1.725 89.814 E94\n" "0 1 15 15 1.377 100.316 C94\n" "0 1 15 18 1.309 101.641 E94\n" "0 1 15 19 1.007 102.069 E94\n" "0 1 15 20 1.366 94.913 E94\n" "0 1 15 22 1.268 99.768 E94\n" "0 1 15 25 0.967 104.732 E94\n" "0 1 15 30 1.379 95.613 E94\n" "0 1 15 37 1.439 97.111 C94\n" "0 1 15 40 1.555 94.643 E94\n" "0 1 15 57 1.301 98.686 E94\n" "0 1 15 63 1.304 98.330 E94\n" "0 1 15 64 1.306 98.066 E94\n" "0 1 15 71 0.931 96.494 C94\n" "0 2 15 2 1.434 95.108 E94\n" "0 2 15 3 1.318 98.813 E94\n" "0 2 15 4 1.426 95.780 E94\n", "0 2 15 15 1.457 97.789 E94\n" "0 2 15 37 1.362 96.942 E94\n" "0 2 15 43 1.709 90.872 E94\n" "0 3 15 3 1.402 95.424 E94\n" "0 3 15 6 1.804 94.075 E94\n" "0 3 15 15 1.403 99.399 E94\n" "4 3 15 20 1.666 79.842 E94\n" "0 3 15 37 1.308 98.541 E94\n" "0 3 15 63 1.390 96.051 E94\n" "0 3 15 71 0.830 97.000 #E94\n" "0 6 15 37 1.679 97.231 E94\n" "0 8 15 8 1.444 105.143 E94\n" "0 8 15 37 1.446 98.976 E94\n" "0 9 15 9 1.626 98.524 E94\n" "0 9 15 64 1.504 97.105 E94\n" "0 10 15 15 1.415 103.715 E94\n" "0 12 15 37 1.428 97.534 E94\n" "0 15 15 15 1.413 104.893 E94\n" "0 15 15 18 1.563 99.173 E94\n" "0 15 15 37 1.361 100.790 E94\n" "0 15 15 64 1.332 102.040 E94\n" "0 15 15 71 0.787 99.239 C94\n" "4 20 15 30 1.978 73.428 E94\n" "0 20 15 37 1.361 95.589 E94\n" "0 25 15 25 0.947 99.505 E94\n" "4 25 15 25 1.030 87.982 E94\n" "0 25 15 26 1.002 96.851 E94\n" "0 25 15 37 1.172 95.428 E94\n" "0 26 15 37 1.144 96.710 E94\n" "4 30 15 30 1.732 79.546 E94\n" "0 37 15 37 1.295 98.802 E94\n" "0 37 15 63 1.379 96.197 E94\n" "0 37 15 64 1.286 99.423 E94\n" "0 37 15 71 0.813 96.222 C94\n" "0 71 15 71 0.734 93.377 C94\n" "0 0 17 0 0.000 99.400 0:*-17-* MMFF DEF\n" "4 0 17 0 0.000 78.400 4:*-17-* MMFF DEF\n" "0 1 17 1 1.415 93.266 X94\n" "0 1 17 2 1.387 94.732 E94\n" "0 1 17 3 1.430 92.852 E94\n" "0 1 17 6 1.863 92.132 E94\n" "0 1 17 7 1.408 107.104 X94\n" "0 1 17 8 1.661 91.498 E94\n" "0 1 17 10 1.547 94.839 E94\n" "0 1 17 20 1.453 91.368 E94\n" "0 1 17 22 1.423 92.591 E94\n" "0 1 17 37 1.376 94.911 E94\n" "0 2 17 2 1.313 97.901 E94\n" "0 2 17 7 1.478 105.412 E94\n" "0 2 17 43 1.207 108.882 E94\n" "0 3 17 7 1.513 103.431 E94\n", "0 6 17 6 2.164 97.766 E94\n" "0 6 17 7 1.850 107.431 E94\n" "0 7 17 8 1.438 113.808 E94\n" "0 7 17 10 1.525 110.549 E94\n" "0 7 17 20 1.442 104.737 E94\n" "0 7 17 22 1.449 104.928 E94\n" "0 7 17 37 1.500 104.313 E94\n" "4 8 17 20 1.891 78.354 E94\n" "0 8 17 37 1.687 91.169 E94\n" "0 37 17 37 1.487 91.633 E94\n" "0 0 18 0 0.000 104.600 0:*-18-* MMFF DEF\n" "4 0 18 0 0.000 80.300 4:*-18-* MMFF DEF\n" "0 1 18 1 1.230 101.166 X94\n" "0 1 18 2 1.264 100.420 E94\n" "0 1 18 3 1.242 100.883 E94\n" "0 1 18 6 1.744 95.671 X94\n" "0 1 18 9 1.438 99.465 E94\n" "0 1 18 20 1.224 101.315 E94\n" "0 1 18 22 1.207 101.417 E94\n" "0 1 18 32 1.446 107.066 X94\n" "0 1 18 37 1.234 101.070 E94\n" "0 1 18 43 1.449 98.014 X94\n" "0 1 18 48 1.277 106.586 X94\n" "0 1 18 62 1.374 102.402 X94\n" "0 2 18 2 1.254 101.492 E94\n" "0 2 18 6 1.664 98.668 E94\n" "0 2 18 9 1.539 96.849 E94\n" "0 2 18 32 1.422 108.979 E94\n" "0 2 18 37 1.263 100.489 E94\n" "0 2 18 48 1.083 116.668 E94\n" "0 3 18 9 1.418 100.361 E94\n" "0 3 18 32 1.557 103.453 E94\n" "0 3 18 43 1.350 101.747 E94\n" "0 6 18 6 1.922 103.052 X94\n" "0 6 18 9 1.916 97.446 E94\n" "0 6 18 32 1.837 108.063 X94\n" "0 6 18 37 1.528 102.229 E94\n" "0 6 18 43 1.644 103.815 E94\n" "0 9 18 12 1.464 101.180 E94\n" "0 9 18 32 1.583 109.945 E94\n" "0 9 18 37 1.358 102.378 E94\n" "0 9 18 43 1.323 109.227 E94\n" "0 12 18 32 1.584 103.959 E94\n" "0 12 18 37 1.376 98.976 E94\n" "0 15 18 32 1.497 107.170 E94\n" "0 15 18 37 1.324 101.399 E94\n" "0 20 18 32 1.383 109.292 E94\n" "0 20 18 37 1.108 106.508 E94\n" "4 20 18 43 1.831 80.297 E94\n" "0 22 18 32 1.465 105.247 E94\n" "0 32 18 32 1.569 120.924 X94\n", "0 32 18 37 1.497 105.280 X94\n" "0 32 18 39 1.804 101.600 X94\n" "0 32 18 43 1.569 108.548 X94\n" "0 32 18 48 1.229 126.841 X94\n" "0 32 18 55 1.509 112.548 E94\n" "0 32 18 58 1.592 106.139 E94\n" "0 32 18 62 1.326 121.426 X94\n" "0 32 18 63 1.571 103.212 E94\n" "0 32 18 64 1.634 101.771 E94\n" "0 32 18 80 1.400 110.401 E94\n" "0 37 18 37 1.157 104.380 E94\n" "0 37 18 39 1.404 99.854 X94\n" "0 37 18 43 1.416 99.200 X94\n" "0 37 18 48 1.330 104.466 E94\n" "0 37 18 55 1.397 100.926 E94\n" "0 37 18 62 1.178 110.665 E94\n" "0 37 18 63 1.202 102.735 E94\n" "0 43 18 43 1.545 99.905 X94\n" "0 43 18 64 1.285 104.868 E94\n" "0 0 19 0 0.000 108.700 0:*-19-* MMFF DEF\n" "4 0 19 0 0.000 89.900 4:*-19-* MMFF DEF\n" "0 1 19 1 0.616 113.339 E94\n" "0 1 19 5 0.390 110.795 X94\n" "0 1 19 6 0.777 113.958 X94\n" "0 1 19 8 0.716 111.521 E94\n" "0 1 19 9 0.779 106.380 E94\n" "0 1 19 12 0.729 108.947 X94\n" "0 1 19 20 0.656 108.828 E94\n" "0 1 19 40 0.754 108.858 E94\n" "0 1 19 63 0.699 106.924 E94\n" "0 1 19 75 0.530 111.633 E94\n" "0 2 19 12 0.819 102.981 E94\n" "0 5 19 5 0.258 108.699 X94\n" "0 5 19 6 0.520 109.677 X94\n" "0 5 19 8 0.461 109.070 E94\n" "0 5 19 12 0.446 106.756 X94\n" "0 6 19 6 1.051 111.280 E94\n" "0 6 19 12 0.968 106.022 E94\n" "0 6 19 37 0.870 108.096 E94\n" "0 8 19 8 0.862 108.099 E94\n" "0 8 19 12 0.786 110.683 E94\n" "0 12 19 12 0.879 104.597 E94\n" "0 15 19 15 0.816 108.681 E94\n" "4 20 19 20 0.802 89.931 E94\n" "0 37 19 37 0.726 105.045 E94\n" "0 0 20 0 0.000 113.200 0:*-20-* MMFF DEF\n" "4 0 20 0 0.000 88.800 4:*-20-* MMFF DEF\n" "0 1 20 1 0.943 113.131 E94\n" "0 1 20 3 0.906 114.940 E94\n" "0 1 20 5 0.417 114.057 C94\n" "0 1 20 6 1.231 110.677 E94\n", "0 1 20 8 1.080 111.090 E94\n" "0 1 20 10 1.100 110.057 E94\n" "0 1 20 11 1.173 110.993 E94\n" "0 1 20 12 0.976 114.773 E94\n" "0 1 20 15 1.035 111.226 E94\n" "0 1 20 18 0.978 115.383 E94\n" "0 1 20 20 0.502 113.313 C94\n" "0 1 20 22 0.915 115.201 E94\n" "0 1 20 25 0.744 116.096 E94\n" "0 1 20 26 0.721 117.611 E94\n" "0 1 20 30 0.908 115.220 E94\n" "0 1 20 34 1.090 110.505 E94\n" "0 1 20 37 0.947 112.650 E94\n" "0 1 20 41 0.973 111.787 E94\n" "0 1 20 43 1.087 110.187 E94\n" "0 1 20 45 1.132 108.074 E94\n" "0 2 20 3 0.982 111.060 E94\n" "0 2 20 5 0.596 113.035 E94\n" "0 2 20 6 1.139 115.851 E94\n" "0 2 20 12 0.951 116.750 E94\n" "0 2 20 20 0.931 114.138 E94\n" "0 3 20 3 0.982 109.919 E94\n" "0 3 20 5 0.624 112.989 C94\n" "0 3 20 6 1.157 113.611 E94\n" "4 3 20 8 1.473 87.271 E94\n" "0 3 20 10 1.016 113.988 E94\n" "0 3 20 11 1.184 109.849 E94\n" "0 3 20 12 0.969 114.891 E94\n" "0 3 20 13 1.008 110.951 E94\n" "0 3 20 20 0.849 118.273 E94\n" "4 3 20 20 1.524 88.961 C94\n" "0 3 20 34 1.137 107.667 E94\n" "4 3 20 37 1.382 85.619 E94\n" "0 3 20 43 0.960 116.707 E94\n" "0 4 20 5 0.584 115.078 E94\n" "0 4 20 20 0.920 115.312 E94\n" "0 5 20 5 0.439 109.107 C94\n" "0 5 20 6 0.818 111.352 C94\n" "0 5 20 8 0.728 114.011 C94\n" "0 5 20 9 0.657 112.826 E94\n" "0 5 20 10 0.663 112.010 E94\n" "0 5 20 12 0.339 114.117 C94\n" "0 5 20 15 0.562 114.339 E94\n" "0 5 20 17 0.561 113.000 #E94\n" "0 5 20 18 0.605 111.570 E94\n" "0 5 20 20 0.564 113.940 C94\n" "0 5 20 26 0.472 109.722 E94\n" "0 5 20 30 0.688 116.038 C94\n" "0 5 20 34 0.661 112.000 #E94\n" "0 5 20 37 0.552 115.670 E94\n" "0 5 20 40 0.682 111.331 E94\n", "0 5 20 43 0.655 111.686 E94\n" "0 6 20 6 1.443 114.408 E94\n" "0 6 20 10 1.225 116.666 E94\n" "0 6 20 13 1.162 114.868 E94\n" "0 6 20 20 1.109 116.117 E94\n" "4 6 20 20 1.433 93.413 C94\n" "0 6 20 22 1.106 117.205 E94\n" "0 6 20 30 1.144 114.705 E94\n" "4 6 20 30 1.658 87.873 E94\n" "0 8 20 20 1.185 105.606 E94\n" "4 8 20 20 1.486 91.244 C94\n" "0 8 20 26 0.874 111.782 E94\n" "0 9 20 20 1.103 109.640 E94\n" "0 10 20 15 1.170 109.525 E94\n" "0 10 20 17 1.127 110.564 E94\n" "0 10 20 18 1.404 100.845 E94\n" "0 10 20 20 1.032 113.170 E94\n" "4 10 20 20 1.468 87.497 E94\n" "4 10 20 30 1.507 86.657 E94\n" "0 10 20 37 0.963 117.360 E94\n" "0 11 20 11 1.504 108.020 E94\n" "0 11 20 17 1.221 109.460 E94\n" "0 11 20 20 1.051 116.673 E94\n" "0 11 20 30 0.997 120.309 E94\n" "0 12 20 12 1.020 117.603 E94\n" "0 12 20 19 0.973 105.821 E94\n" "0 12 20 20 0.866 118.108 C94\n" "0 12 20 30 0.887 120.399 E94\n" "0 13 20 13 1.077 113.361 E94\n" "0 13 20 20 0.938 115.037 E94\n" "0 14 20 20 0.837 112.888 E94\n" "0 15 20 15 1.094 114.048 E94\n" "0 15 20 20 1.058 109.793 E94\n" "4 15 20 20 1.324 90.483 E94\n" "0 15 20 30 0.960 115.468 E94\n" "4 15 20 30 1.447 86.726 E94\n" "4 17 20 17 1.309 94.977 E94\n" "0 17 20 20 0.930 116.108 E94\n" "0 18 20 20 1.007 113.480 E94\n" "4 18 20 20 1.355 90.185 E94\n" "0 18 20 41 1.241 102.656 E94\n" "0 19 20 19 0.567 122.298 E94\n" "4 19 20 19 0.921 88.477 E94\n" "0 20 20 20 1.008 108.644 E94\n" "4 20 20 20 1.149 90.294 C94\n" "0 20 20 22 0.840 119.817 E94\n" "4 20 20 22 1.364 86.669 E94\n" "4 20 20 25 1.181 84.818 E94\n" "0 20 20 30 0.994 109.745 E94\n" "4 20 20 30 1.399 85.303 C94\n" "0 20 20 34 1.069 111.143 E94\n", "4 20 20 34 1.382 90.128 E94\n" "0 20 20 37 0.833 119.709 E94\n" "4 20 20 37 1.346 86.810 E94\n" "0 20 20 40 1.097 110.254 E94\n" "0 20 20 41 0.922 114.408 E94\n" "0 20 20 43 0.964 116.540 E94\n" "4 20 20 43 1.290 92.879 E94\n" "0 20 20 45 1.083 110.090 E94\n" "0 22 20 22 0.866 118.829 E94\n" "4 22 20 22 1.649 79.399 E94\n" "4 26 20 26 0.789 96.811 E94\n" "0 26 20 34 0.843 113.805 E94\n" "0 34 20 41 1.070 111.943 E94\n" "0 37 20 43 0.954 117.365 E94\n" "0 0 22 0 0.000 116.100 0:*-22-* MMFF DEF\n" "3 0 22 0 0.000 59.400 3::*-22-* MMFF DEF\n" "4 0 22 0 0.000 91.600 4:*-22-* MMFF DEF\n" "0 1 22 1 0.903 116.483 E94\n" "0 1 22 2 0.884 118.360 E94\n" "0 1 22 3 0.836 121.424 E94\n" "0 1 22 4 0.900 117.720 E94\n" "0 1 22 5 0.604 111.788 E94\n" "0 1 22 6 1.179 113.545 E94\n" "0 1 22 8 0.973 117.469 E94\n" "0 1 22 17 1.070 109.087 E94\n" "0 1 22 18 1.097 108.265 E94\n" "0 1 22 22 0.871 118.246 E94\n" "0 1 22 37 0.882 118.041 E94\n" "0 1 22 43 1.014 114.899 E94\n" "3 2 22 2 0.263 48.820 E94\n" "0 2 22 3 0.956 114.147 E94\n" "0 2 22 4 0.784 126.957 E94\n" "0 2 22 5 0.573 115.869 E94\n" "0 2 22 6 1.012 123.319 E94\n" "0 2 22 22 0.880 118.260 E94\n" "3 2 22 22 0.166 60.845 E94\n" "0 2 22 45 1.009 116.146 E94\n" "0 3 22 3 0.819 122.977 E94\n" "0 3 22 4 0.876 119.718 E94\n" "0 3 22 5 0.559 116.738 E94\n" "0 3 22 6 1.184 113.646 E94\n" "0 3 22 8 1.072 112.261 E94\n" "0 3 22 10 0.987 117.750 E94\n" "0 3 22 12 0.930 118.047 E94\n" "4 3 22 20 1.267 90.869 E94\n" "0 3 22 22 0.861 119.252 E94\n" "4 3 22 22 1.196 93.287 E94\n" "4 3 22 30 1.301 89.217 E94\n" "0 3 22 37 0.852 120.464 E94\n" "0 3 22 40 1.033 114.288 E94\n" "0 3 22 43 1.124 109.441 E94\n", "0 3 22 45 1.117 110.033 E94\n" "0 4 22 5 0.560 118.000 #E94\n" "0 4 22 6 1.200 113.650 E94\n" "0 4 22 8 0.966 119.034 E94\n" "0 4 22 15 0.931 120.455 E94\n" "0 4 22 22 0.877 118.890 E94\n" "0 4 22 45 1.089 112.227 E94\n" "0 5 22 5 0.242 114.938 C94\n" "0 5 22 6 0.683 117.836 E94\n" "0 5 22 8 0.621 115.758 E94\n" "0 5 22 10 0.658 113.806 E94\n" "0 5 22 11 0.776 108.296 X94\n" "0 5 22 12 0.620 109.865 X94\n" "0 5 22 20 0.623 110.000 #E94\n" "0 5 22 22 0.583 117.875 C94\n" "0 5 22 37 0.532 119.438 E94\n" "0 5 22 40 0.653 112.855 E94\n" "0 5 22 41 0.519 122.000 #E94\n" "0 5 22 43 0.658 112.128 E94\n" "0 5 22 45 0.665 112.000 #E94\n" "0 6 22 12 1.136 118.409 E94\n" "0 6 22 17 1.328 108.583 E94\n" "0 6 22 18 1.381 107.009 E94\n" "0 6 22 22 1.124 115.942 E94\n" "3 6 22 22 0.205 60.711 E94\n" "0 6 22 37 1.093 118.170 E94\n" "3 6 22 43 0.179 68.138 E94\n" "0 6 22 45 1.422 108.368 E94\n" "0 8 22 22 0.925 120.144 E94\n" "3 8 22 22 0.176 61.507 E94\n" "0 10 22 22 0.916 121.411 E94\n" "3 10 22 22 0.184 60.603 E94\n" "0 11 22 11 1.610 102.859 E94\n" "0 11 22 22 1.062 116.086 X94\n" "0 12 22 12 1.067 114.988 E94\n" "0 12 22 22 0.925 117.971 X94\n" "0 13 22 13 1.085 113.473 E94\n" "0 13 22 22 0.908 117.606 E94\n" "0 15 22 22 0.918 120.404 E94\n" "0 17 22 22 1.029 111.106 E94\n" "0 18 22 22 1.078 109.054 E94\n" "0 20 22 22 0.812 122.430 E94\n" "4 20 22 22 1.198 92.930 E94\n" "0 22 22 22 0.787 124.070 E94\n" "3 22 22 22 0.171 60.000 C94\n" "4 22 22 22 1.225 91.653 E94\n" "0 22 22 30 0.777 124.514 E94\n" "0 22 22 34 0.983 116.415 E94\n" "0 22 22 37 0.847 120.135 E94\n" "3 22 22 40 0.178 61.163 E94\n" "0 22 22 41 0.886 118.045 E94\n", "3 22 22 43 0.176 61.536 E94\n" "0 22 22 45 1.022 114.380 E94\n" "0 34 22 41 1.008 116.095 E94\n" "0 37 22 37 0.846 120.774 E94\n" "3 37 22 37 0.237 51.029 E94\n" "0 37 22 43 0.936 119.789 E94\n" "0 0 25 0 0.000 106.500 0:*-25-* MMFF DEF\n" "4 0 25 0 0.000 89.100 4:*-25-* MMFF DEF\n" "0 1 25 1 1.072 99.158 X94\n" "0 1 25 3 1.268 91.423 E94\n" "0 1 25 6 1.394 98.288 X94\n" "0 1 25 8 1.150 101.775 E94\n" "0 1 25 12 1.180 98.890 E94\n" "0 1 25 15 1.074 103.431 E94\n" "0 1 25 25 0.852 100.707 E94\n" "0 1 25 32 1.186 107.891 X94\n" "0 1 25 37 0.972 104.924 E94\n" "0 1 25 40 1.358 93.644 E94\n" "0 1 25 43 1.190 98.760 X94\n" "0 1 25 71 0.537 109.363 E94\n" "0 1 25 72 0.976 111.306 X94\n" "0 2 25 6 1.302 102.892 E94\n" "0 2 25 8 1.022 109.148 E94\n" "0 2 25 10 1.629 85.839 E94\n" "0 2 25 32 0.983 120.127 E94\n" "0 2 25 72 0.863 119.249 E94\n" "0 3 25 6 1.277 103.026 E94\n" "0 3 25 32 1.164 109.307 E94\n" "0 6 25 6 1.769 99.311 X94\n" "0 6 25 8 1.419 104.161 E94\n" "0 6 25 9 1.403 105.407 E94\n" "0 6 25 10 1.448 102.194 E94\n" "0 6 25 11 1.680 99.260 E94\n" "0 6 25 12 1.489 98.818 E94\n" "0 6 25 32 1.501 109.688 X94\n" "0 6 25 37 1.312 102.280 E94\n" "0 6 25 39 1.617 97.314 E94\n" "0 6 25 40 1.380 105.601 E94\n" "0 6 25 71 0.844 100.242 E94\n" "0 6 25 72 1.219 112.058 E94\n" "0 8 25 8 1.224 105.341 E94\n" "0 8 25 10 1.214 104.893 E94\n" "0 8 25 11 1.411 101.655 E94\n" "0 8 25 20 1.010 108.094 E94\n" "0 8 25 32 1.217 114.325 E94\n" "0 8 25 37 1.106 104.742 E94\n" "0 8 25 40 1.265 103.617 E94\n" "0 8 25 72 0.977 117.767 E94\n" "0 9 25 32 1.232 114.493 E94\n" "0 10 25 10 1.346 98.856 E94\n" "0 10 25 32 1.273 110.640 E94\n", "0 10 25 72 1.021 114.624 E94\n" "0 11 25 32 1.528 106.045 E94\n" "0 12 25 12 1.303 99.224 E94\n" "0 12 25 32 1.305 106.320 E94\n" "0 15 25 15 1.113 107.673 E94\n" "4 15 25 15 1.264 93.138 E94\n" "0 15 25 32 1.248 107.964 E94\n" "0 15 25 72 0.933 119.729 E94\n" "4 20 25 20 1.220 85.039 E94\n" "0 20 25 72 0.965 111.595 E94\n" "0 25 25 72 0.890 106.612 E94\n" "0 32 25 32 1.248 122.857 X94\n" "0 32 25 37 1.097 113.430 E94\n" "0 32 25 39 1.605 99.255 E94\n" "0 32 25 40 1.122 119.057 E94\n" "0 32 25 43 1.257 110.308 X94\n" "0 32 25 57 1.219 108.740 E94\n" "0 32 25 63 1.211 108.168 E94\n" "0 32 25 71 0.642 117.733 X94\n" "0 32 25 72 1.050 121.823 E94\n" "0 37 25 37 0.947 107.124 E94\n" "0 37 25 40 0.965 112.107 E94\n" "0 37 25 72 0.868 118.776 E94\n" "0 40 25 40 1.496 95.270 E94\n" "0 40 25 72 1.035 114.441 E94\n" "0 57 25 57 1.059 102.995 E94\n" "0 63 25 63 1.032 102.950 E94\n" "0 71 25 71 0.419 100.483 X94\n" "0 0 26 0 0.000 98.100 0:*-26-* MMFF DEF\n" "4 0 26 0 0.000 83.600 4:*-26-* MMFF DEF\n" "0 1 26 1 1.085 98.054 E94\n" "0 1 26 8 1.263 96.331 E94\n" "0 1 26 10 1.115 102.175 E94\n" "0 1 26 12 1.147 98.926 X94\n" "0 1 26 15 1.141 100.260 E94\n" "0 1 26 20 1.075 98.171 E94\n" "0 1 26 26 0.997 92.571 E94\n" "0 1 26 37 1.081 98.754 E94\n" "0 1 26 71 0.672 97.353 X94\n" "0 6 26 6 1.833 97.935 E94\n" "0 6 26 11 1.663 100.061 E94\n" "0 6 26 12 1.442 99.021 E94\n" "0 8 26 8 1.189 105.662 E94\n" "0 8 26 12 1.028 110.069 E94\n" "0 8 26 34 1.509 93.096 E94\n" "0 11 26 11 1.757 94.795 E94\n" "0 12 26 15 1.271 99.730 E94\n" "0 12 26 34 1.508 90.565 E94\n" "0 12 26 40 1.165 103.783 E94\n" "0 12 26 71 0.704 96.577 X94\n" "0 15 26 26 1.047 96.592 E94\n", "0 15 26 40 1.543 91.164 E94\n" "4 20 26 20 1.252 83.624 E94\n" "0 71 26 71 0.473 94.470 X94\n" "0 0 30 0 0.000 134.200 0:*-30-* MMFF DEF\n" "1 0 30 0 0.000 131.800 1:*-30-* MMFF DEF\n" "4 0 30 0 0.000 97.700 4:*-30-* MMFF DEF\n" "7 0 30 0 0.000 92.300 7:*-30-* MMFF DEF\n" "1 2 30 3 0.778 128.756 E94\n" "0 2 30 15 0.805 130.439 E94\n" "0 2 30 20 0.727 132.187 E94\n" "0 2 30 22 0.737 131.100 E94\n" "1 2 30 30 0.751 132.225 E94\n" "1 3 30 4 0.721 134.566 E94\n" "1 3 30 5 0.410 135.975 E94\n" "1 3 30 6 0.845 137.596 E94\n" "1 3 30 20 0.714 130.677 E94\n" "7 3 30 20 1.280 89.957 E94\n" "1 3 30 30 0.857 122.418 E94\n" "7 3 30 30 1.260 93.102 E94\n" "0 4 30 20 0.690 136.444 E94\n" "0 5 30 20 0.390 131.835 C94\n" "0 5 30 30 0.364 132.652 C94\n" "0 6 30 30 0.876 139.045 E94\n" "0 15 30 15 0.876 130.718 E94\n" "4 15 30 15 1.239 101.359 E94\n" "0 15 30 30 0.782 132.228 E94\n" "4 15 30 30 1.141 100.902 E94\n" "4 20 30 30 1.117 95.513 C94\n" "7 20 30 30 1.191 93.909 E94\n" "0 20 30 40 0.769 134.526 E94\n" "1 20 30 67 0.704 138.631 E94\n" "4 22 30 22 1.179 93.007 E94\n" "8 30 30 30 1.230 93.732 E94\n" "0 30 30 40 0.706 145.470 E94\n" "1 30 30 67 0.907 125.792 E94\n" "0 0 34 0 0.000 109.400 0:*-34-* MMFF DEF\n" "4 0 34 0 0.000 89.400 4:*-34-* MMFF DEF\n" "0 1 34 1 0.862 112.251 C94\n" "0 1 34 2 1.154 109.212 E94\n" "0 1 34 8 1.330 106.399 E94\n" "0 1 34 9 1.166 112.989 E94\n" "0 1 34 10 1.388 104.291 E94\n" "0 1 34 20 1.201 106.135 E94\n" "0 1 34 26 0.913 112.004 E94\n" "0 1 34 36 0.576 111.206 C94\n" "0 1 34 37 1.141 109.045 E94\n" "0 2 34 36 0.694 112.000 #E94\n" "0 8 34 36 0.796 109.753 E94\n" "0 9 34 36 0.793 108.649 E94\n" "0 10 34 36 0.828 108.000 #E94\n" "4 20 34 20 1.448 89.411 E94\n", "0 20 34 36 0.665 112.526 E94\n" "0 22 34 36 0.694 110.000 #E94\n" "0 36 34 36 0.578 107.787 C94\n" "0 36 34 37 0.717 108.668 E94\n" "0 36 34 43 0.840 108.000 #E94\n" "0 0 37 0 0.000 118.800 0:*-37-* MMFF DEF\n" "1 0 37 0 0.000 115.900 1:*-37-* MMFF DEF\n" "3 0 37 0 0.000 64.700 3::*-37-* MMFF DEF\n" "4 0 37 0 0.000 91.800 4:*-37-* MMFF DEF\n" "0 1 37 37 0.803 120.419 C94\n" "0 1 37 38 0.992 118.432 E94\n" "0 1 37 58 1.027 116.528 E94\n" "0 1 37 63 0.837 123.024 E94\n" "0 1 37 64 0.821 124.073 E94\n" "0 1 37 69 1.038 115.506 E94\n" "1 2 37 37 0.712 119.695 C94\n" "1 2 37 38 1.029 117.220 E94\n" "1 3 37 37 0.798 114.475 C94\n" "7 3 37 37 1.320 90.784 E94\n" "1 3 37 38 1.109 112.724 E94\n" "1 3 37 58 1.134 111.566 E94\n" "1 3 37 69 1.119 111.916 E94\n" "1 4 37 37 0.906 119.614 E94\n" "1 4 37 38 1.087 114.623 E94\n" "0 5 37 37 0.563 120.571 C94\n" "0 5 37 38 0.693 115.588 C94\n" "0 5 37 58 0.699 113.316 E94\n" "0 5 37 63 0.702 121.238 C94\n" "0 5 37 64 0.523 121.446 C94\n" "0 5 37 69 0.794 111.638 C94\n" "0 5 37 78 0.563 119.432 E94\n" "0 6 37 37 0.968 116.495 C94\n" "0 6 37 38 1.324 115.886 E94\n" "0 6 37 64 1.139 118.868 E94\n" "1 9 37 37 0.974 121.003 E94\n" "1 9 37 38 1.137 117.591 E94\n" "0 10 37 37 1.025 117.918 E94\n" "0 10 37 38 1.088 120.135 E94\n" "0 10 37 58 1.077 120.925 E94\n" "0 11 37 37 1.094 118.065 E94\n" "0 11 37 38 1.223 117.328 E94\n" "0 12 37 37 0.950 118.495 E94\n" "0 12 37 38 1.126 113.859 E94\n" "0 12 37 64 1.076 111.320 E94\n" "0 13 37 37 0.917 118.117 E94\n" "0 14 37 37 0.861 118.045 E94\n" "0 15 37 37 0.755 121.037 C94\n" "0 15 37 38 1.027 119.421 E94\n" "0 15 37 64 0.976 117.125 E94\n" "0 17 37 37 0.930 119.408 E94\n" "0 17 37 38 1.179 110.828 E94\n", "0 17 37 64 0.946 118.357 E94\n" "0 18 37 37 1.029 113.991 X94\n" "0 18 37 38 1.278 106.908 E94\n" "0 18 37 64 0.975 117.029 E94\n" "0 19 37 37 0.660 125.278 E94\n" "0 20 37 37 0.744 129.614 E94\n" "4 20 37 37 1.217 93.425 E94\n" "0 22 37 37 0.805 125.777 E94\n" "3 22 37 37 0.152 64.704 E94\n" "0 22 37 38 0.904 124.494 E94\n" "0 25 37 37 0.718 121.600 E94\n" "0 26 37 37 0.691 122.967 E94\n" "0 34 37 37 1.030 116.423 E94\n" "0 34 37 64 1.074 113.905 E94\n" "0 35 37 37 0.964 131.858 E94\n" "0 35 37 38 1.187 124.980 E94\n" "0 37 37 37 0.669 119.977 C94\n" "1 37 37 37 0.864 122.227 E94\n" "4 37 37 37 1.380 90.193 E94\n" "0 37 37 38 0.596 126.139 C94\n" "1 37 37 38 1.033 117.271 E94\n" "0 37 37 39 1.038 117.619 E94\n" "1 37 37 39 1.078 114.622 E94\n" "0 37 37 40 1.045 121.633 C94\n" "0 37 37 41 0.892 119.572 E94\n" "0 37 37 43 1.013 117.860 X94\n" "0 37 37 45 1.114 112.337 E94\n" "0 37 37 46 0.999 120.038 E94\n" "0 37 37 55 1.002 120.163 E94\n" "0 37 37 56 1.020 117.801 E94\n" "1 37 37 57 0.881 120.932 E94\n" "0 37 37 58 1.014 120.052 E94\n" "1 37 37 58 1.127 112.251 E94\n" "0 37 37 61 1.072 115.515 E94\n" "0 37 37 62 0.941 124.384 E94\n" "0 37 37 63 0.478 111.243 C94\n" "1 37 37 63 0.894 120.190 E94\n" "0 37 37 64 0.423 112.567 C94\n" "1 37 37 64 0.912 118.973 E94\n" "1 37 37 67 1.064 114.980 E94\n" "0 37 37 69 0.872 116.778 C94\n" "1 37 37 69 1.042 116.438 E94\n" "0 37 37 78 0.974 116.439 E94\n" "0 37 37 81 1.034 115.664 E94\n" "1 37 37 81 1.104 111.759 E94\n" "0 38 37 38 0.725 128.938 C94\n" "0 38 37 40 1.024 123.755 E94\n" "0 38 37 43 1.165 115.355 E94\n" "0 38 37 58 0.979 128.362 E94\n" "1 38 37 58 1.257 111.356 E94\n" "0 38 37 62 1.148 118.349 E94\n", "0 38 37 63 1.095 115.386 E94\n" "1 38 37 63 1.076 114.910 E94\n" "0 38 37 64 1.070 116.605 E94\n" "1 38 37 67 1.289 109.610 E94\n" "0 40 37 58 1.103 119.417 E94\n" "0 40 37 63 0.943 122.904 E94\n" "0 40 37 64 0.931 123.541 E94\n" "0 40 37 78 0.931 123.604 E94\n" "0 41 37 58 0.967 120.535 E94\n" "0 45 37 63 1.031 116.781 E94\n" "0 45 37 64 1.156 110.199 E94\n" "0 45 37 69 1.248 111.041 E94\n" "0 58 37 62 1.016 125.987 E94\n" "0 58 37 63 1.152 112.628 E94\n" "0 58 37 64 1.291 106.250 E94\n" "1 58 37 64 1.108 113.166 E94\n" "0 58 37 78 1.188 110.842 E94\n" "0 0 38 0 0.000 113.800 0:*-38-* MMFF DEF\n" "0 37 38 37 1.085 115.406 C94\n" "0 37 38 38 1.289 112.016 C94\n" "0 37 38 63 1.230 110.181 E94\n" "0 37 38 64 1.207 111.032 E94\n" "0 37 38 69 1.238 114.692 E94\n" "0 37 38 78 1.118 114.813 E94\n" "0 38 38 38 1.343 118.516 E94\n" "0 0 39 0 0.000 120.700 0:*-39-* MMFF DEF\n" "1 0 39 0 0.000 125.400 1:*-39-* MMFF DEF\n" "0 1 39 63 0.854 123.380 C94\n" "0 1 39 65 1.111 118.049 E94\n" "1 2 39 63 0.858 130.275 E94\n" "1 2 39 65 0.900 133.220 E94\n" "1 3 39 63 0.900 127.045 E94\n" "1 3 39 65 1.126 118.909 E94\n" "0 6 39 63 1.166 122.985 E94\n" "0 6 39 65 1.396 117.707 E94\n" "0 8 39 63 1.000 124.868 E94\n" "0 8 39 65 1.057 127.145 E94\n" "1 9 39 63 0.981 127.725 E94\n" "1 9 39 65 1.170 122.487 E94\n" "0 10 39 63 1.109 119.788 E94\n" "0 10 39 65 1.118 124.961 E94\n" "0 18 39 63 1.108 117.061 X94\n" "0 23 39 63 0.551 127.770 C94\n" "0 23 39 65 0.752 118.352 C94\n" "0 23 39 78 0.581 124.000 #E94\n" "0 25 39 63 0.667 134.561 E94\n" "0 25 39 65 0.944 118.135 E94\n" "0 37 39 63 0.900 127.009 E94\n" "1 37 39 63 0.922 125.312 E94\n" "1 37 39 65 1.080 121.090 E94\n" "0 40 39 63 0.984 126.832 E94\n", "0 45 39 63 1.056 121.641 E94\n" "0 45 39 65 1.354 112.464 E94\n" "0 63 39 63 1.152 109.599 C94\n" "1 63 39 63 0.887 128.078 E94\n" "0 63 39 64 1.004 120.577 E94\n" "1 63 39 64 0.899 126.936 E94\n" "0 63 39 65 1.284 112.087 C94\n" "1 63 39 65 1.146 117.990 E94\n" "0 63 39 78 1.300 105.800 E94\n" "0 64 39 65 1.007 126.117 E94\n" "0 65 39 65 1.462 116.898 C94\n" "0 0 40 0 0.000 115.000 0:*-40-* MMFF DEF\n" "3 0 40 0 0.000 57.800 3::*-40-* MMFF DEF\n" "0 1 40 1 1.064 113.703 E94\n" "0 1 40 2 0.998 118.873 E94\n" "0 1 40 3 1.007 118.319 E94\n" "0 1 40 6 1.421 109.742 E94\n" "0 1 40 9 1.203 113.198 E94\n" "0 1 40 10 1.232 111.320 E94\n" "0 1 40 11 1.436 104.665 E94\n" "0 1 40 12 1.202 109.320 E94\n" "0 1 40 20 1.047 114.970 E94\n" "0 1 40 25 0.912 114.483 E94\n" "0 1 40 28 0.689 112.374 C94\n" "0 1 40 30 1.024 118.604 E94\n" "0 1 40 37 0.835 107.349 C94\n" "0 1 40 39 1.254 110.622 E94\n" "0 1 40 40 1.183 114.011 E94\n" "0 1 40 45 1.223 112.226 E94\n" "0 1 40 46 1.025 122.982 E94\n" "0 1 40 63 1.084 114.473 E94\n" "0 1 40 64 1.064 115.483 E94\n" "0 2 40 2 0.997 120.651 E94\n" "0 2 40 3 0.981 121.660 E94\n" "0 2 40 6 1.316 115.626 E94\n" "0 2 40 9 1.118 119.196 E94\n" "0 2 40 10 1.142 117.260 E94\n" "0 2 40 19 0.732 128.087 E94\n" "0 2 40 28 0.767 111.053 C94\n" "0 2 40 37 1.049 117.022 E94\n" "0 2 40 39 1.192 115.106 E94\n" "0 2 40 40 1.060 122.253 E94\n" "0 2 40 63 1.008 120.447 E94\n" "0 3 40 3 0.883 128.240 E94\n" "0 3 40 8 1.259 111.557 E94\n" "0 3 40 9 1.106 119.822 E94\n" "0 3 40 10 1.269 111.261 E94\n" "0 3 40 12 1.146 112.718 E94\n" "0 3 40 15 1.105 117.871 E94\n" "0 3 40 20 1.130 112.139 E94\n" "0 3 40 22 1.072 114.420 E94\n", "0 3 40 25 0.820 121.724 E94\n" "0 3 40 28 0.700 114.808 C94\n" "0 3 40 37 1.056 116.655 E94\n" "0 3 40 40 1.147 117.511 E94\n" "0 3 40 64 1.132 113.602 E94\n" "0 6 40 28 0.889 110.000 #E94\n" "0 8 40 28 0.764 111.915 E94\n" "0 8 40 37 1.216 112.920 E94\n" "0 8 40 63 1.351 108.085 E94\n" "0 9 40 28 0.774 112.549 E94\n" "0 9 40 37 1.236 112.751 E94\n" "0 10 40 28 0.799 109.725 E94\n" "0 10 40 37 1.316 108.686 E94\n" "0 11 40 37 1.546 101.687 E94\n" "0 15 40 15 1.154 121.497 E94\n" "3 22 40 22 0.204 57.777 E94\n" "0 22 40 37 1.066 114.220 E94\n" "0 22 40 63 1.126 112.006 E94\n" "0 25 40 28 0.485 120.000 #E94\n" "0 25 40 37 0.868 117.977 E94\n" "0 26 40 28 0.506 118.000 #E94\n" "0 26 40 37 0.812 122.336 E94\n" "0 28 40 28 0.560 109.160 C94\n" "0 28 40 30 0.656 119.230 E94\n" "0 28 40 37 0.662 110.288 C94\n" "0 28 40 39 0.789 110.951 E94\n" "0 28 40 40 0.782 111.731 E94\n" "0 28 40 45 0.674 120.000 #E94\n" "0 28 40 54 0.738 118.714 E94\n" "0 28 40 63 0.670 116.188 E94\n" "0 28 40 64 0.659 117.057 E94\n" "0 28 40 78 0.618 119.829 E94\n" "0 37 40 37 1.004 119.018 E94\n" "0 37 40 45 1.376 106.579 E94\n" "0 37 40 54 1.394 107.777 E94\n" "0 37 40 63 1.060 116.867 E94\n" "0 45 40 64 1.283 111.332 E94\n" "0 45 40 78 1.410 105.678 E94\n" "0 46 40 64 1.189 116.345 E94\n" "0 0 41 0 0.000 118.300 0:*-41-* MMFF DEF\n" "0 1 41 32 1.209 114.689 C94\n" "0 1 41 72 1.024 114.936 X94\n" "0 2 41 32 1.309 115.461 C94\n" "0 3 41 32 1.210 114.810 E94\n" "0 5 41 32 0.912 113.960 C94\n" "0 6 41 72 1.319 113.899 E94\n" "0 9 41 72 1.089 117.795 E94\n" "0 10 41 72 1.039 121.240 E94\n" "0 20 41 32 1.090 120.965 E94\n" "0 22 41 32 1.079 122.748 E94\n" "0 32 41 32 1.181 130.600 C94\n", "0 32 41 37 1.136 118.871 E94\n" "0 32 41 41 1.401 107.694 E94\n" "0 37 41 72 1.035 114.919 E94\n" "0 55 41 72 0.982 123.972 E94\n" "0 62 41 72 1.052 120.425 E94\n" "0 72 41 72 0.912 130.128 X94\n" "0 72 41 80 1.094 112.175 E94\n" "0 0 43 0 0.000 113.300 0:*-43-* MMFF DEF\n" "0 1 43 1 1.109 110.353 E94\n" "0 1 43 2 1.052 114.321 E94\n" "0 1 43 3 0.938 121.050 E94\n" "0 1 43 4 0.927 123.204 E94\n" "0 1 43 18 1.116 115.011 X94\n" "0 1 43 25 0.853 115.637 X94\n" "0 1 43 28 0.646 113.739 X94\n" "0 1 43 37 1.083 112.511 E94\n" "0 1 43 45 1.140 115.034 E94\n" "0 1 43 64 1.025 116.188 E94\n" "0 2 43 18 1.227 110.268 E94\n" "0 3 43 18 1.011 121.488 X94\n" "0 3 43 20 1.053 113.913 E94\n" "4 3 43 20 1.327 93.575 E94\n" "0 3 43 28 0.626 117.464 X94\n" "0 4 43 28 0.616 122.000 E94\n" "0 4 43 45 1.253 112.373 E94\n" "0 6 43 18 1.673 104.311 E94\n" "3 6 43 22 0.279 54.827 E94\n" "0 6 43 28 0.868 110.000 #E94\n" "0 6 43 37 1.519 105.833 E94\n" "0 6 43 43 1.603 108.652 E94\n" "0 8 43 18 1.511 104.036 E94\n" "0 8 43 28 0.794 110.320 E94\n" "0 15 43 15 1.558 103.008 E94\n" "0 15 43 18 1.409 108.458 E94\n" "0 17 43 18 1.367 111.904 E94\n" "0 18 43 18 1.144 120.463 E94\n" "0 18 43 20 0.961 123.768 E94\n" "4 18 43 20 1.451 92.867 E94\n" "0 18 43 22 1.171 112.379 E94\n" "0 18 43 28 0.628 116.881 X94\n" "0 18 43 34 1.324 111.347 E94\n" "0 18 43 37 1.185 112.132 X94\n" "0 18 43 43 1.379 109.036 E94\n" "0 18 43 64 1.108 116.279 E94\n" "0 20 43 28 0.626 115.000 #E94\n" "3 22 43 22 0.209 57.032 E94\n" "0 25 43 28 0.468 118.274 X94\n" "0 28 43 28 0.477 112.596 X94\n" "0 28 43 34 0.810 110.000 #E94\n" "0 28 43 37 0.669 113.350 X94\n" "0 28 43 64 0.658 115.293 E94\n", "0 0 44 0 0.000 91.600 0:*-44-* MMFF DEF\n" "0 63 44 63 1.962 88.495 C94\n" "0 63 44 65 2.261 94.137 C94\n" "0 63 44 78 1.738 86.270 E94\n" "0 63 44 80 1.748 86.194 E94\n" "0 65 44 65 1.530 101.147 E94\n" "0 65 44 80 1.629 93.534 E94\n" "0 78 44 78 0.903 119.401 E94\n" "0 0 45 0 0.000 116.700 0:*-45-* MMFF DEF\n" "0 1 45 32 1.260 118.182 X94\n" "0 2 45 32 1.294 118.082 X94\n" "0 3 45 32 1.343 115.589 E94\n" "0 6 45 32 1.787 111.682 X94\n" "0 8 45 32 1.515 115.695 E94\n" "0 9 45 32 1.339 123.850 E94\n" "0 10 45 32 1.578 112.194 E94\n" "0 20 45 32 1.245 118.893 E94\n" "0 22 45 32 1.293 117.503 E94\n" "0 32 45 32 1.467 128.036 X94\n" "0 32 45 37 1.298 117.857 E94\n" "0 32 45 39 1.715 107.633 E94\n" "0 32 45 40 1.497 116.432 E94\n" "0 32 45 43 1.545 113.711 E94\n" "0 32 45 63 1.335 116.765 E94\n" "0 32 45 64 1.330 116.908 E94\n" "0 32 45 78 1.394 114.962 E94\n" "0 0 46 0 0.000 111.000 0:*-46-* MMFF DEF\n" "0 1 46 7 1.440 110.492 X94\n" "0 2 46 7 1.489 112.709 E94\n" "0 7 46 8 1.724 109.817 E94\n" "0 7 46 37 1.519 110.569 E94\n" "0 7 46 40 1.650 111.405 E94\n" "0 0 48 0 0.000 118.400 0:*-48-* MMFF DEF\n" "0 3 48 18 1.065 122.928 E94\n" "0 18 48 28 0.736 113.969 X94\n" "0 0 49 0 0.000 111.400 0:*-49-* MMFF DEF\n" "0 50 49 50 0.522 111.433 C94\n" "0 0 51 0 0.000 111.400 0:*-51-* MMFF DEF\n" "0 3 51 52 0.913 111.360 X94\n" "0 0 53 0 0.000 180.000 0:*-53-* MMFF DEF\n" "0 3 53 47 0.574 180.000 E94\n" "0 9 53 47 0.649 180.000 E94\n" "0 0 54 0 0.000 119.500 0:*-54-* MMFF DEF\n" "1 0 54 0 0.000 115.700 1:*-54-* MMFF DEF\n" "0 1 54 1 0.923 121.439 E94\n" "0 1 54 3 0.707 124.083 C94\n" "0 1 54 36 0.294 122.881 C94\n" "0 3 54 6 1.376 115.398 E94\n" "1 3 54 9 1.128 114.457 E94\n" "0 3 54 36 0.685 119.698 C94\n" "1 3 54 40 1.105 116.439 E94\n", "0 6 54 36 0.826 115.000 #E94\n" "0 9 54 40 1.195 123.403 E94\n" "0 36 54 36 0.300 113.943 C94\n" "0 0 55 0 0.000 120.800 0:*-55-* MMFF DEF\n" "0 1 55 1 0.951 119.946 E94\n" "0 1 55 36 0.307 126.448 C94\n" "0 1 55 37 1.032 117.035 E94\n" "0 1 55 57 0.751 120.606 C94\n" "0 1 55 80 0.972 121.082 E94\n" "0 2 55 3 1.041 116.994 E94\n" "0 2 55 36 0.621 120.000 #E94\n" "0 2 55 57 1.047 118.847 E94\n" "0 3 55 9 1.053 121.298 E94\n" "0 3 55 36 0.567 124.000 #E94\n" "0 3 55 57 0.953 123.573 E94\n" "0 3 55 62 1.041 122.163 E94\n" "0 6 55 36 0.833 114.000 #E94\n" "0 6 55 57 1.408 112.958 E94\n" "0 8 55 36 0.656 122.000 #E94\n" "0 8 55 57 1.259 113.209 E94\n" "0 9 55 57 1.001 126.373 E94\n" "0 18 55 36 0.578 125.000 #E94\n" "0 18 55 57 1.054 122.320 E94\n" "0 36 55 36 0.355 117.729 C94\n" "0 36 55 37 0.623 120.405 E94\n" "0 36 55 41 0.485 134.689 E94\n" "0 36 55 57 0.663 119.499 C94\n" "0 36 55 64 0.632 118.000 #E94\n" "0 36 55 80 0.684 115.880 E94\n" "0 37 55 57 1.110 115.816 E94\n" "0 41 55 57 0.911 126.801 E94\n" "0 57 55 62 1.054 123.366 E94\n" "0 57 55 64 1.026 119.465 E94\n" "0 0 56 0 0.000 119.100 0:*-56-* MMFF DEF\n" "0 1 56 36 0.472 123.585 C94\n" "0 1 56 57 0.774 119.267 C94\n" "0 2 56 9 1.181 116.311 E94\n" "0 2 56 36 0.582 124.037 E94\n" "0 2 56 57 1.029 118.607 E94\n" "0 3 56 36 0.585 121.521 E94\n" "0 3 56 57 0.885 126.567 E94\n" "0 8 56 36 0.785 111.009 E94\n" "0 8 56 57 1.288 110.357 E94\n" "0 9 56 36 0.683 120.258 E94\n" "0 9 56 57 1.186 115.661 E94\n" "0 36 56 36 0.450 117.534 C94\n" "0 36 56 37 0.602 120.000 #E94\n" "0 36 56 57 0.646 120.649 C94\n" "0 36 56 63 0.579 123.766 E94\n" "0 36 56 80 0.625 120.000 #E94\n" "0 37 56 57 1.058 115.912 E94\n", "0 57 56 63 1.019 118.915 E94\n" "0 0 57 0 0.000 120.900 0:*-57-* MMFF DEF\n" "1 0 57 0 0.000 118.100 1:*-57-* MMFF DEF\n" "0 1 57 55 1.017 117.865 E94\n" "1 3 57 55 1.085 115.034 E94\n" "0 5 57 55 0.674 116.747 C94\n" "0 6 57 55 1.279 119.257 E94\n" "1 9 57 55 0.980 128.143 E94\n" "0 12 57 55 1.058 118.327 E94\n" "0 15 57 55 0.983 123.646 E94\n" "0 25 57 55 0.790 122.889 E94\n" "1 37 57 55 0.967 121.379 E94\n" "0 55 57 55 0.855 126.476 C94\n" "1 55 57 63 1.016 118.800 E94\n" "1 55 57 64 1.039 117.166 E94\n" "0 56 57 56 1.342 120.010 C94\n" "0 0 58 0 0.000 119.000 0:*-58-* MMFF DEF\n" "1 0 58 0 0.000 119.900 1:*-58-* MMFF DEF\n" "0 1 58 37 1.003 119.236 E94\n" "0 1 58 64 0.961 121.070 E94\n" "1 3 58 37 0.983 121.506 E94\n" "0 6 58 37 1.371 114.370 E94\n" "0 18 58 37 1.005 120.665 E94\n" "0 36 58 37 0.650 118.713 E94\n" "0 36 58 63 0.650 118.000 #E94\n" "0 36 58 64 0.620 120.051 E94\n" "0 37 58 37 0.996 122.710 E94\n" "1 37 58 37 1.036 118.260 E94\n" "0 37 58 63 1.087 116.989 E94\n" "0 37 58 64 1.061 117.942 E94\n" "0 0 59 0 0.000 105.600 0:*-59-* MMFF DEF\n" "0 63 59 63 1.273 106.313 C94\n" "0 63 59 65 1.750 107.755 C94\n" "0 63 59 78 1.713 101.179 E94\n" "0 63 59 80 1.599 105.341 E94\n" "0 65 59 65 1.754 107.683 E94\n" "0 65 59 78 1.644 107.142 E94\n" "0 65 59 82 1.864 103.624 E94\n" "0 0 61 0 0.000 180.000 0:*-61-* MMFF DEF\n" "0 1 61 60 0.475 180.000 E94\n" "0 37 61 42 0.536 180.000 E94\n" "0 37 61 60 0.484 180.000 E94\n" "0 0 62 0 0.000 108.300 0:*-62-* MMFF DEF\n" "0 1 62 18 1.316 109.273 X94\n" "0 2 62 23 0.817 105.542 X94\n" "0 3 62 3 1.318 106.821 E94\n" "0 3 62 18 1.311 111.144 E94\n" "0 3 62 55 1.528 102.414 E94\n" "0 9 62 18 1.515 107.660 E94\n" "0 18 62 37 1.229 114.618 E94\n" "0 18 62 41 1.366 108.722 E94\n", "0 18 62 63 1.427 106.284 E94\n" "0 18 62 64 1.317 110.366 E94\n" "0 0 63 0 0.000 123.300 0:*-63-* MMFF DEF\n" "1 0 63 0 0.000 124.300 1:*-63-* MMFF DEF\n" "0 1 63 39 0.935 121.832 E94\n" "0 1 63 44 0.902 122.101 E94\n" "0 1 63 59 1.175 115.253 E94\n" "0 1 63 64 0.737 131.378 E94\n" "0 1 63 66 0.865 127.610 E94\n" "1 2 63 39 1.027 117.864 E94\n" "1 2 63 59 0.987 127.524 E94\n" "1 2 63 64 0.730 133.818 E94\n" "1 2 63 66 0.828 132.383 E94\n" "1 3 63 39 0.900 125.395 E94\n" "1 3 63 44 0.935 120.481 E94\n" "1 3 63 59 1.158 117.219 E94\n" "1 3 63 64 0.766 130.065 E94\n" "1 3 63 66 0.950 123.049 E94\n" "1 4 63 44 0.848 126.602 E94\n" "1 4 63 59 1.211 114.804 E94\n" "1 4 63 64 0.795 127.817 E94\n" "0 5 63 39 0.617 121.127 C94\n" "0 5 63 44 0.393 126.141 C94\n" "0 5 63 59 0.784 114.076 C94\n" "0 5 63 64 0.577 131.721 C94\n" "0 5 63 66 0.643 125.134 C94\n" "0 5 63 78 0.482 130.000 #E94\n" "0 5 63 81 0.588 124.000 #E94\n" "0 6 63 39 1.234 120.509 E94\n" "0 6 63 59 1.564 113.514 E94\n" "0 6 63 64 0.951 131.301 E94\n" "1 9 63 39 1.068 121.741 E94\n" "1 9 63 44 0.963 124.598 E94\n" "1 9 63 64 0.804 134.237 E94\n" "1 9 63 66 0.912 133.020 E94\n" "0 10 63 39 1.084 120.356 E94\n" "0 10 63 44 1.112 115.732 E94\n" "0 10 63 59 1.307 116.218 E94\n" "0 10 63 64 0.867 128.750 E94\n" "0 10 63 66 0.981 127.617 E94\n" "0 12 63 39 1.111 114.439 E94\n" "0 12 63 44 1.035 119.321 E94\n" "0 12 63 64 0.838 126.226 E94\n" "0 12 63 66 0.980 122.280 E94\n" "0 15 63 39 1.064 117.958 E94\n" "0 15 63 44 0.952 125.654 E94\n" "0 15 63 64 0.813 129.284 E94\n" "0 15 63 66 0.962 124.490 E94\n" "0 18 63 44 1.110 116.077 E94\n" "0 18 63 64 0.740 135.028 E94\n" "0 19 63 39 0.647 132.369 E94\n", "0 19 63 64 0.517 141.986 E94\n" "0 25 63 39 0.597 139.439 E94\n" "0 25 63 66 0.776 122.699 E94\n" "0 35 63 59 1.351 124.475 E94\n" "0 35 63 64 0.808 145.098 E94\n" "0 37 63 39 1.011 132.046 C94\n" "1 37 63 39 0.934 123.481 E94\n" "0 37 63 44 0.764 133.930 E94\n" "1 37 63 44 0.915 121.637 E94\n" "0 37 63 59 1.041 124.836 E94\n" "1 37 63 59 1.214 114.211 E94\n" "0 37 63 64 0.679 122.881 C94\n" "1 37 63 64 0.742 131.784 E94\n" "0 37 63 66 0.742 140.668 E94\n" "1 37 63 66 0.871 128.130 E94\n" "0 38 63 39 1.022 124.814 E94\n" "0 38 63 64 0.910 126.513 E94\n" "0 39 63 39 0.910 131.461 E94\n" "1 39 63 39 1.105 119.174 E94\n" "0 39 63 40 1.112 119.261 E94\n" "1 39 63 44 1.144 114.126 E94\n" "0 39 63 45 1.166 115.115 E94\n" "1 39 63 57 0.931 123.222 E94\n" "0 39 63 58 1.042 123.231 E94\n" "1 39 63 63 0.949 122.353 E94\n" "0 39 63 64 0.813 107.255 C94\n" "1 39 63 64 0.943 123.441 E94\n" "0 39 63 66 1.012 110.865 C94\n" "1 39 63 66 1.095 120.834 E94\n" "0 40 63 44 0.943 125.881 E94\n" "0 40 63 59 1.298 117.078 E94\n" "0 40 63 64 0.845 130.865 E94\n" "0 40 63 66 0.940 130.926 E94\n" "0 44 63 45 1.125 114.633 E94\n" "0 44 63 56 1.030 120.178 E94\n" "0 44 63 62 0.991 122.899 E94\n" "1 44 63 63 0.894 123.341 E94\n" "0 44 63 64 0.853 108.480 C94\n" "0 44 63 66 0.854 114.516 C94\n" "0 44 63 72 0.915 129.129 E94\n" "0 44 63 78 1.217 106.254 E94\n" "0 44 63 81 1.278 108.400 E94\n" "0 45 63 59 1.467 108.824 E94\n" "0 45 63 64 0.940 122.725 E94\n" "0 45 63 66 1.164 116.157 E94\n" "0 56 63 66 0.875 134.888 E94\n" "1 57 63 66 0.945 123.246 E94\n" "0 58 63 64 0.965 122.522 E94\n" "0 59 63 64 1.035 110.108 C94\n" "0 59 63 66 1.181 115.592 C94\n" "0 62 63 66 0.976 128.662 E94\n", "1 63 63 64 0.776 129.499 E94\n" "1 63 63 66 0.929 124.689 E94\n" "0 66 63 72 0.911 129.610 E94\n" "0 0 64 0 0.000 121.400 0:*-64-* MMFF DEF\n" "1 0 64 0 0.000 121.700 1:*-64-* MMFF DEF\n" "0 1 64 63 0.776 128.041 E94\n" "0 1 64 64 0.766 128.061 E94\n" "0 1 64 65 0.963 120.640 E94\n" "0 1 64 66 0.952 120.685 E94\n" "0 1 64 81 1.050 114.735 E94\n" "0 1 64 82 1.013 117.414 E94\n" "1 2 64 63 0.861 122.947 E94\n" "1 2 64 64 0.816 125.433 E94\n" "1 2 64 65 0.907 125.781 E94\n" "1 2 64 66 1.010 118.540 E94\n" "1 2 64 82 0.923 124.473 E94\n" "1 3 64 63 0.828 124.890 E94\n" "1 3 64 64 0.774 128.286 E94\n" "1 3 64 65 0.973 120.954 E94\n" "1 3 64 66 0.949 121.821 E94\n" "1 3 64 81 0.995 118.754 E94\n" "1 4 64 63 0.845 123.889 E94\n" "1 4 64 64 0.804 126.131 E94\n" "1 4 64 65 1.036 117.401 E94\n" "1 4 64 66 1.010 118.254 E94\n" "0 5 64 63 0.501 126.170 C94\n" "0 5 64 64 0.546 127.405 C94\n" "0 5 64 65 0.664 118.412 C94\n" "0 5 64 66 0.699 120.478 C94\n" "0 5 64 78 0.482 127.331 E94\n" "0 5 64 81 0.605 120.000 #E94\n" "0 5 64 82 0.597 122.000 #E94\n" "0 6 64 63 1.112 120.985 E94\n" "0 6 64 64 1.043 123.922 E94\n" "0 6 64 65 1.348 115.506 E94\n" "0 6 64 66 1.156 123.890 E94\n" "1 9 64 64 0.959 120.924 E94\n" "1 9 64 65 1.098 119.529 E94\n" "1 9 64 66 1.013 123.743 E94\n" "0 10 64 63 0.937 123.695 E94\n" "0 10 64 64 0.893 125.735 E94\n" "0 10 64 65 1.016 124.788 E94\n" "0 10 64 66 1.065 121.125 E94\n" "0 12 64 63 0.845 126.259 E94\n" "0 12 64 64 0.869 124.058 E94\n" "0 12 64 65 1.020 120.198 E94\n" "0 12 64 66 0.971 122.900 E94\n" "0 13 64 63 0.845 123.004 E94\n" "0 13 64 64 0.883 120.111 E94\n" "0 15 64 63 0.870 124.581 E94\n" "0 15 64 64 0.882 123.309 E94\n", "0 15 64 65 1.008 121.049 E94\n" "0 15 64 66 0.990 121.826 E94\n" "0 18 64 65 1.065 118.404 E94\n" "0 18 64 66 1.067 118.002 E94\n" "0 37 64 63 0.906 117.966 C94\n" "0 37 64 64 0.854 136.087 C94\n" "1 37 64 64 0.772 128.673 E94\n" "0 37 64 65 0.799 134.844 E94\n" "1 37 64 65 0.942 122.866 E94\n" "0 37 64 66 0.845 130.337 E94\n" "0 37 64 78 0.706 135.432 E94\n" "0 37 64 81 0.917 124.856 E94\n" "0 37 64 82 0.946 123.684 E94\n" "1 37 64 82 1.000 119.086 E94\n" "0 38 64 63 0.988 121.242 E94\n" "0 38 64 64 0.858 129.014 E94\n" "0 38 64 65 0.989 127.335 E94\n" "0 38 64 66 1.022 124.454 E94\n" "0 39 64 64 1.086 114.312 E94\n" "0 39 64 65 1.060 122.481 E94\n" "1 39 64 65 1.204 114.188 E94\n" "1 39 64 66 1.170 115.157 E94\n" "0 40 64 63 0.948 123.538 E94\n" "0 40 64 64 0.928 123.853 E94\n" "0 40 64 65 0.958 129.125 E94\n" "0 40 64 81 1.035 123.154 E94\n" "0 40 64 82 1.183 115.934 E94\n" "0 43 64 63 0.885 126.749 E94\n" "0 43 64 64 0.898 124.876 E94\n" "0 43 64 65 1.024 123.706 E94\n" "0 43 64 66 1.017 123.409 E94\n" "0 45 64 63 0.981 120.063 E94\n" "0 45 64 64 0.921 123.014 E94\n" "0 45 64 65 1.276 110.521 E94\n" "0 45 64 66 1.199 113.371 E94\n" "0 55 64 64 0.907 124.405 E94\n" "0 55 64 65 1.002 125.220 E94\n" "1 57 64 65 1.020 117.950 E94\n" "1 57 64 66 0.959 121.017 E94\n" "0 58 64 63 1.075 115.646 E94\n" "0 58 64 64 0.815 131.812 E94\n" "0 58 64 66 0.978 126.562 E94\n" "0 62 64 64 0.885 126.560 E94\n" "0 62 64 65 1.073 121.703 E94\n" "0 63 64 64 0.866 108.239 C94\n" "1 63 64 64 0.827 124.584 E94\n" "0 63 64 66 1.038 111.621 C94\n" "0 63 64 78 1.172 105.176 E94\n" "0 63 64 81 1.164 110.895 E94\n" "0 63 64 82 1.395 101.902 E94\n" "0 64 64 64 0.967 115.037 E94\n" "0 64 64 65 0.916 113.570 C94\n" "1 64 64 66 1.003 118.067 E94\n", "0 64 64 78 1.194 103.479 E94\n" "0 64 64 82 1.210 108.553 E94\n" "0 65 64 66 1.055 115.369 C94\n" "0 65 64 81 1.168 116.240 E94\n" "0 66 64 66 0.932 129.624 E94\n" "0 0 65 0 0.000 104.500 0:*-65-* MMFF DEF\n" "0 39 65 64 1.738 101.550 C94\n" "0 39 65 66 1.589 106.360 C94\n" "0 39 65 82 1.740 101.208 E94\n" "0 44 65 64 1.430 103.829 C94\n" "0 44 65 66 1.366 110.552 E94\n" "0 44 65 78 1.419 104.213 E94\n" "0 59 65 64 1.788 103.452 C94\n" "0 59 65 81 1.774 104.872 E94\n" "0 0 66 0 0.000 106.900 0:*-66-* MMFF DEF\n" "0 63 66 64 1.206 103.779 C94\n" "0 63 66 66 1.406 106.735 C94\n" "0 63 66 78 1.339 105.365 E94\n" "0 63 66 81 1.408 106.806 E94\n" "0 64 66 65 1.709 107.658 C94\n" "0 65 66 66 1.932 111.306 C94\n" "0 0 67 0 0.000 119.900 0:*-67-* MMFF DEF\n" "1 0 67 0 0.000 116.600 1:*-67-* MMFF DEF\n" "0 1 67 3 0.982 120.683 E94\n" "0 1 67 9 1.178 115.581 E94\n" "0 1 67 32 1.233 119.589 E94\n" "0 1 67 67 1.257 111.574 E94\n" "1 2 67 32 1.118 126.320 E94\n" "1 2 67 67 1.231 113.438 E94\n" "0 3 67 23 0.567 128.000 #E94\n" "0 3 67 32 1.290 120.945 E94\n" "1 3 67 37 1.122 113.631 E94\n" "1 9 67 30 1.142 118.899 E94\n" "0 9 67 32 1.325 125.531 E94\n" "1 9 67 37 1.186 115.979 E94\n" "0 23 67 32 0.805 120.000 #E94\n" "1 30 67 32 1.370 114.854 E94\n" "1 32 67 37 1.240 120.019 E94\n" "0 32 67 67 1.504 117.327 E94\n" "1 37 67 67 1.310 110.017 E94\n" "0 0 68 0 0.000 108.800 0:*-68-* MMFF DEF\n" "0 1 68 1 1.159 108.238 C94\n" "0 1 68 23 0.772 107.200 C94\n" "0 1 68 32 0.958 110.757 C94\n" "0 23 68 23 0.650 104.892 C94\n" "0 23 68 32 0.659 112.977 C94\n" "0 0 69 0 0.000 120.300 0:*-69-* MMFF DEF\n" "0 32 69 37 1.123 121.777 C94\n" "0 32 69 38 1.486 117.217 E94\n" "0 37 69 37 1.223 116.447 C94\n" "0 38 69 38 1.122 125.930 E94\n" "0 31 70 31 0.658 103.978 C94\n" "0 0 73 0 0.000 106.600 0:*-73-* MMFF DEF\n" "0 1 73 32 1.590 100.180 X94\n" "0 1 73 72 1.481 96.166 X94\n" "0 32 73 32 1.665 115.012 X94\n" "0 32 73 72 1.326 115.134 X94\n" "0 0 74 0 0.000 113.000 0:*-74-* MMFF DEF\n" "0 3 74 7 1.357 113.010 X94\n" "0 0 75 0 0.000 94.900 0:*-75-* MMFF DEF\n" "0 1 75 3 1.138 96.779 E94\n" "0 3 75 19 1.044 91.970 E94\n" "0 3 75 71 0.729 95.899 X94\n" "0 0 76 0 0.000 107.600 0:*-76-* MMFF DEF\n" "0 76 76 76 1.434 109.889 X94\n" "0 76 76 78 1.493 103.519 X94\n" "0 78 76 78 1.235 109.421 E94\n" "0 0 77 0 0.000 109.500 0:*-77-* MMFF DEF\n" "0 32 77 32 1.652 109.472 X94\n" "0 0 78 0 0.000 121.900 0:*-78-* MMFF DEF\n" "1 0 78 0 0.000 126.100 1:*-78-* MMFF DEF\n" "0 1 78 78 0.744 130.960 E94\n" "0 1 78 81 0.938 121.477 E94\n" "1 3 78 78 0.827 125.468 E94\n" "1 3 78 81 0.922 123.748 E94\n" "0 5 78 76 0.584 123.407 X94\n" "0 5 78 78 0.546 128.000 C94\n" "0 5 78 79 0.617 122.000 #E94\n" "0 5 78 81 0.542 109.881 C94\n" "1 9 78 78 0.863 129.501 E94\n" "1 9 78 81 0.991 125.857 E94\n" "0 37 78 76 0.770 137.282 E94\n" "0 37 78 78 0.803 128.249 E94\n" "0 37 78 81 0.864 128.714 E94\n" "0 38 78 78 0.844 130.617 E94\n" "0 38 78 81 1.023 123.532 E94\n" "0 39 78 64 0.734 138.714 E94\n" "0 39 78 78 1.202 109.426 E94\n" "0 40 78 76 0.930 130.150 E94\n" "0 40 78 78 0.778 135.746 E94\n" "0 40 78 81 1.058 121.251 E94\n" "0 44 78 63 0.677 141.902 E94\n" "0 44 78 64 0.663 142.589 E94\n" "0 44 78 66 0.816 134.701 E94\n" "0 44 78 78 1.089 111.702 E94\n" "0 45 78 76 1.199 114.467 E94\n" "0 45 78 78 0.915 125.050 E94\n" "0 45 78 81 1.216 112.926 E94\n" "0 59 78 64 0.963 128.471 E94\n" "0 59 78 65 1.097 128.375 E94\n" "0 59 78 78 1.443 105.916 E94\n" "0 63 78 64 0.942 117.779 E94\n" "0 64 78 65 0.835 131.530 E94\n" "0 64 78 78 1.038 111.834 E94\n" "0 66 78 78 1.030 118.376 E94\n" "0 76 78 76 1.245 113.179 X94\n" "0 76 78 78 1.159 111.900 E94\n" "0 78 78 78 1.336 99.459 E94\n" "0 78 78 81 1.302 105.130 C94\n" "0 79 78 81 1.217 114.792 E94\n" "0 0 79 0 0.000 103.400 0:*-79-* MMFF DEF\n" "0 78 79 81 1.569 102.043 E94\n" "0 79 79 81 1.625 104.857 E94\n" "0 0 80 0 0.000 121.900 0:*-80-* MMFF DEF\n" "1 0 80 0 0.000 128.200 1:*-80-* MMFF DEF\n" "0 1 80 81 0.864 127.147 E94\n" "1 3 80 81 0.886 128.181 E94\n" "0 5 80 81 0.651 125.682 C94\n" "0 18 80 81 1.032 120.869 E94\n" "0 41 80 81 0.909 125.057 E94\n" "0 44 80 55 0.918 127.755 E94\n" "0 44 80 81 1.184 112.411 E94\n" "0 55 80 59 1.254 120.263 E94\n" "0 55 80 81 0.991 127.612 E94\n" "0 56 80 81 1.003 126.038 E94\n" "0 59 80 81 1.439 112.030 E94\n" "0 81 80 81 1.205 108.609 C94\n" "0 0 81 0 0.000 119.500 0:*-81-* MMFF DEF\n" "0 1 81 63 0.996 120.129 E94\n" "0 1 81 64 0.978 119.970 E94\n" "0 1 81 78 0.879 126.535 E94\n" "0 1 81 79 1.144 116.113 E94\n" "0 1 81 80 0.895 126.324 E94\n" "1 2 81 78 0.927 125.080 E94\n" "1 2 81 80 0.895 128.399 E94\n" "1 9 81 78 1.015 124.270 E94\n" "1 9 81 80 1.106 120.028 E94\n" "0 36 81 64 0.522 130.295 E94\n" "0 36 81 66 0.583 128.738 E94\n" "0 36 81 78 0.578 124.658 C94\n" "0 36 81 80 0.575 124.787 C94\n" "0 37 81 64 0.929 122.408 E94\n" "1 37 81 64 0.983 119.722 E94\n" "0 37 81 65 1.184 114.158 E94\n" "1 37 81 65 1.281 110.477 E94\n" "1 37 81 78 0.884 126.208 E94\n" "1 37 81 80 0.940 123.333 E94\n" "0 63 81 64 1.115 114.945 E94\n" "0 64 81 65 1.075 122.099 E94\n" "0 64 81 80 1.143 113.176 E94\n" "0 66 81 80 1.067 122.250 E94\n" "0 78 81 80 0.957 110.556 C94\n" "0 79 81 80 1.379 107.936 E94\n" "0 32 82 59 1.666 114.660 E94\n" "0 32 82 64 1.075 131.706 E94\n" "0 32 82 65 1.238 129.293 E94\n" "0 59 82 64 1.563 105.660 E94\n" "0 64 82 65 1.281 112.955 E94\n", "EOS"}; class std::unique_ptr<MMFFStbnCollection> MMFFStbnCollection::ds_instance = nullptr; extern const std::string defaultMMFFStbn; MMFFStbnCollection *MMFFStbnCollection::getMMFFStbn( const std::string &mmffStbn) { if (!ds_instance || !mmffStbn.empty()) { ds_instance.reset(new MMFFStbnCollection(mmffStbn)); } return ds_instance.get(); } MMFFStbnCollection::MMFFStbnCollection(std::string mmffStbn) { if (mmffStbn.empty()) { mmffStbn = defaultMMFFStbn; } std::istringstream inStream(mmffStbn); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFStbn mmffStbnObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int stretchBendType = boost::lexical_cast<unsigned int>(*token); #else d_stretchBendType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int iAtomType = boost::lexical_cast<unsigned int>(*token); #else d_iAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int jAtomType = boost::lexical_cast<unsigned int>(*token); #else d_jAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int kAtomType = boost::lexical_cast<unsigned int>(*token); #else d_kAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; mmffStbnObj.kbaIJK = boost::lexical_cast<double>(*token); ++token; mmffStbnObj.kbaKJI = boost::lexical_cast<double>(*token); ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[stretchBendType][iAtomType][jAtomType][kAtomType] = mmffStbnObj; #else d_params.push_back(mmffStbnObj); #endif } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFStbn = "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* MMFF STRETCH-BEND PARAMETERS- Rev: 23-FEB-93 Source: MMFF\n" "* C94 - CORE MMFF parameter, from fits to HF/6-31G* 2nd D's\n" "* X94 - EXTD MMFF parameter, also from fits to HF/6-31G* 2nd D's\n" "*\n" "* types I, J, K kbaIJK kbaKJI Source\n" "0 1 1 1 0.206 0.206 C94\n" "0 1 1 2 0.136 0.197 C94\n" "0 1 1 3 0.211 0.092 C94\n" "0 1 1 5 0.227 0.070 C94\n" "0 1 1 6 0.173 0.417 C94\n" "0 1 1 8 0.136 0.282 C94\n" "0 1 1 10 0.187 0.338 C94\n" "0 1 1 11 0.209 0.633 C94\n" "0 1 1 12 0.176 0.386 C94\n" "0 1 1 15 0.139 0.217 C94\n" "0 1 1 34 0.236 0.436 C94\n" "0 1 1 37 0.152 0.260 C94\n" "0 1 1 39 0.144 0.595 C94\n" "0 1 1 41 0.122 0.051 C94\n" "0 1 1 56 0.262 0.451 C94\n" "0 1 1 68 0.186 0.125 C94\n" "0 2 1 2 0.282 0.282 C94\n" "0 2 1 3 0.206 0.022 C94\n" "0 2 1 5 0.234 0.088 C94\n" "0 2 1 6 0.183 0.387 C94\n" "0 2 1 8 0.214 0.363 C94\n" "0 3 1 5 0.157 0.115 C94\n" "0 3 1 6 -0.036 0.456 C94\n" "0 3 1 10 0.038 0.195 C94\n" "0 5 1 5 0.115 0.115 C94\n" "0 5 1 6 0.013 0.436 C94\n" "0 5 1 8 0.027 0.358 C94\n" "0 5 1 9 0.040 0.418 C94\n" "0 5 1 10 0.043 0.261 C94\n" "0 5 1 11 0.003 0.452 C94\n" "0 5 1 12 -0.018 0.380 C94\n" "0 5 1 15 0.018 0.255 C94\n" "0 5 1 18 0.121 0.218 X94\n" "0 5 1 20 0.069 0.327 C94\n" "0 5 1 22 0.055 0.267 X94\n" "0 5 1 34 -0.003 0.342 C94\n" "0 5 1 37 0.074 0.287 C94\n" "0 5 1 39 0.092 0.607 C94\n" "0 5 1 40 0.023 0.335 C94\n" "0 5 1 41 0.093 0.118 C94\n" "0 5 1 54 0.016 0.343 C94\n" "0 5 1 55 0.030 0.397 C94\n" "0 5 1 56 0.031 0.384 C94\n" "0 5 1 68 0.041 0.216 C94\n" "0 6 1 6 0.320 0.320 C94\n" "0 6 1 37 0.310 0.160 C94\n" "0 11 1 11 0.586 0.586 C94\n" "0 12 1 12 0.508 0.508 C94\n" "0 1 2 1 0.250 0.250 C94\n" "0 1 2 2 0.203 0.207 C94\n" "2 1 2 2 0.222 0.269 C94\n" "2 1 2 3 0.244 0.292 C94\n" "0 1 2 5 0.215 0.128 C94\n" "2 1 2 37 0.246 0.260 C94\n" "1 2 2 2 0.250 0.219 C94\n" "2 2 2 3 0.155 0.112 C94\n" "0 2 2 5 0.207 0.157 C94\n" "1 2 2 5 0.267 0.159 C94\n" "0 2 2 6 0.118 0.576 C94\n" "2 2 2 37 0.143 0.172 C94\n" "0 2 2 40 0.289 0.390 C94\n" "0 2 2 41 0.191 -0.047 C94\n" "1 3 2 5 0.264 0.156 C94\n" "0 5 2 5 0.140 0.140 C94\n" "0 5 2 6 0.213 0.502 C94\n" "2 5 2 37 0.153 0.288 C94\n" "0 5 2 40 0.070 0.463 C94\n" "0 5 2 41 0.191 0.005 C94\n" "0 1 3 1 0.358 0.358 C94\n" "2 1 3 2 0.246 0.409 C94\n" "2 1 3 3 0.303 0.145 C94\n" "0 1 3 5 0.321 0.183 C94\n" "0 1 3 6 0.338 0.732 C94\n" "0 1 3 7 0.154 0.856 C94\n" "0 1 3 10 0.223 0.732 C94\n" "2 1 3 37 0.217 0.207 C94\n" "1 2 3 5 0.407 0.159 C94\n" "1 2 3 6 0.429 0.473 C94\n" "1 2 3 7 0.214 0.794 C94\n" "1 2 3 9 0.227 0.610 C94\n" "1 2 3 10 0.298 0.600 C94\n" "1 3 3 5 0.251 0.133 C94\n" "1 3 3 6 0.066 0.668 C94\n" "1 3 3 7 -0.093 0.866 C94\n" "0 5 3 5 0.126 0.126 C94\n" "0 5 3 6 0.174 0.734 C94\n" "0 5 3 7 0.032 0.805 C94\n" "0 5 3 9 0.037 0.669 C94\n" "0 5 3 10 0.169 0.619 C94\n" "0 5 3 40 0.087 0.685 C94\n" "0 5 3 54 0.098 0.210 C94\n" "0 6 3 7 0.494 0.578 C94\n" "4 6 3 20 1.179 0.752 X94\n" "2 6 3 37 0.350 0.175 C94\n" "0 7 3 10 0.771 0.353 C94\n" "0 7 3 20 0.865 -0.181 C94\n" "2 7 3 37 0.707 0.007 C94\n" "0 9 3 40 0.680 0.260 C94\n" "0 10 3 10 1.050 1.050 C94\n" "4 20 3 20 0.536 0.536 C94\n" "0 40 3 40 0.482 0.482 C94\n" "0 1 6 1 0.309 0.309 C94\n" "0 1 6 2 0.157 0.375 C94\n" "0 1 6 3 -0.153 0.252 C94\n" "0 1 6 21 0.256 0.143 C94\n" "0 1 6 37 0.163 0.375 C94\n" "0 2 6 3 -0.228 0.052 C94\n" "0 2 6 29 0.259 0.163 C94\n" "4 3 6 20 0.456 0.379 X94\n" "0 3 6 24 0.215 0.064 C94\n" "0 3 6 37 -0.225 -0.320 C94\n" "0 8 6 21 0.304 0.055 C94\n" "0 10 6 21 0.419 0.158 C94\n" "0 18 6 33 0.309 0.120 X94\n" "4 20 6 20 0.739 0.739 C94\n" "0 29 6 37 0.130 0.241 C94\n" "0 31 6 31 0.227 0.227 X94\n" "0 1 8 1 0.312 0.312 C94\n" "0 1 8 6 0.212 0.354 C94\n" "0 1 8 23 0.309 0.135 C94\n" "0 6 8 23 0.418 0.020 C94\n" "4 20 8 20 0.653 0.653 C94\n" "0 20 8 23 0.128 0.122 C94\n" "0 23 8 23 0.190 0.190 C94\n" "0 1 9 3 0.326 0.580 C94\n" "0 3 9 27 0.464 0.222 C94\n" "0 1 10 1 0.063 0.063 C94\n" "0 1 10 3 -0.021 0.340 C94\n" "0 1 10 6 -0.024 0.374 C94\n" "0 1 10 28 0.155 -0.051 C94\n" "0 3 10 3 -0.219 -0.219 C94\n" "0 3 10 6 0.497 0.513 C94\n" "0 3 10 28 0.137 0.066 C94\n" "0 28 10 28 0.081 0.081 C94\n" "0 1 15 1 0.125 0.125 C94\n" "0 1 15 15 0.012 0.238 C94\n" "0 1 15 37 0.048 0.229 C94\n" "0 1 15 71 0.080 -0.012 C94\n" "0 15 15 71 0.172 -0.068 C94\n" "0 37 15 71 0.187 -0.027 C94\n" "0 71 15 71 0.045 0.045 C94\n" "0 1 18 1 0.023 0.023 X94\n" "0 1 18 6 0.003 0.213 X94\n" "0 1 18 32 -0.091 0.390 X94\n" "0 1 18 43 -0.008 0.607 X94\n" "0 6 18 6 0.088 0.088 X94\n" "0 6 18 32 0.123 0.369 X94\n" "0 32 18 32 0.404 0.404 X94\n" "0 32 18 43 0.384 0.281 X94\n" "0 43 18 43 0.428 0.428 X94\n" "0 1 20 5 0.290 0.098 C94\n" "0 1 20 20 0.179 0.004 C94\n" "0 3 20 5 -0.049 0.171 C94\n" "4 3 20 20 0.607 0.437 C94\n" "0 5 20 5 0.182 0.182 C94\n" "0 5 20 6 0.051 0.312 C94\n" "0 5 20 8 0.072 0.226 C94\n" "0 5 20 12 0.014 0.597 C94\n" "0 5 20 20 0.101 0.079 C94\n" "0 5 20 30 0.108 0.123 C94\n" "4 6 20 20 0.823 0.396 C94\n" "4 8 20 20 0.701 0.369 C94\n" "0 12 20 20 0.310 0.000 C94\n" "4 20 20 20 0.283 0.283 C94\n" "4 20 20 30 0.340 0.529 C94\n" "0 1 22 5 0.067 0.174 X94\n" "0 1 22 22 0.199 0.039 X94\n" "0 5 22 5 0.254 0.254 C94\n" "0 5 22 22 0.181 0.108 C94\n" "5 22 22 22 0.000 0.000 C94\n" "0 5 26 5 -0.121 -0.121 X94\n" "0 5 30 20 0.251 0.007 C94\n" "0 5 30 30 0.267 0.054 C94\n" "4 20 30 30 0.413 0.705 C94\n" "0 1 34 1 0.202 0.202 C94\n" "0 1 34 36 0.160 -0.009 C94\n" "0 36 34 36 0.087 0.087 C94\n" "0 1 37 37 0.485 0.311 C94\n" "1 2 37 37 0.321 0.235 C94\n" "1 3 37 37 0.179 0.217 C94\n" "0 5 37 37 0.279 0.250 C94\n" "0 5 37 38 0.267 0.389 C94\n" "0 5 37 63 0.216 0.434 C94\n" "0 5 37 64 0.167 0.364 C94\n" "0 5 37 69 0.273 0.391 C94\n" "0 6 37 37 0.830 0.339 C94\n" "0 15 37 37 0.650 0.259 C94\n" "0 37 37 37 -0.411 -0.411 C94\n" "0 37 37 38 -0.424 -0.466 C94\n" "0 37 37 40 0.429 0.901 C94\n" "0 37 37 63 -0.173 -0.215 C94\n" "0 37 37 64 -0.229 -0.229 C94\n" "0 37 37 69 -0.244 -0.555 C94\n" "0 38 37 38 -0.516 -0.516 C94\n" "0 37 38 37 -0.342 -0.342 C94\n" "0 37 38 38 -0.164 -1.130 C94\n" "0 1 39 63 0.313 0.500 C94\n" "0 23 39 63 -0.131 0.422 C94\n" "0 23 39 65 -0.122 0.281 C94\n" "0 63 39 63 0.469 0.469 C94\n" "0 63 39 65 0.741 0.506 C94\n" "0 65 39 65 0.706 0.706 C94\n" "0 1 40 28 0.238 0.091 C94\n" "0 1 40 37 0.153 0.590 C94\n" "0 2 40 28 0.342 0.156 C94\n" "0 3 40 28 0.228 0.104 C94\n" "0 28 40 28 0.094 0.094 C94\n" "0 28 40 37 0.186 0.423 C94\n" "0 1 41 32 0.503 0.943 C94\n" "0 2 41 32 0.594 0.969 C94\n" "0 5 41 32 0.276 0.852 C94\n" "0 32 41 32 0.652 0.652 C94\n" "0 18 43 23 0.377 0.057 X94\n" "0 23 43 23 0.082 0.082 X94\n" "0 63 44 63 0.591 0.591 C94\n" "0 63 44 65 0.857 0.978 C94\n" "0 50 49 50 0.072 0.072 C94\n" "0 1 54 3 0.192 -0.051 C94\n" "0 1 54 36 0.240 0.079 C94\n" "0 3 54 36 0.005 0.127 C94\n" "0 36 54 36 0.148 0.148 C94\n" "0 1 55 36 0.189 0.033 C94\n" "0 1 55 57 0.166 0.211 C94\n" "0 36 55 36 0.106 0.106 C94\n" "0 36 55 57 0.093 0.080 C94\n" "0 1 56 36 0.211 -0.040 C94\n" "0 1 56 57 0.026 0.386 C94\n" "0 36 56 36 0.101 0.101 C94\n" "0 36 56 57 0.108 0.068 C94\n" "0 5 57 55 0.043 0.420 C94\n" "0 55 57 55 0.125 0.125 C94\n" "0 56 57 56 0.431 0.431 C94\n" "0 58 57 58 0.732 0.732 C94\n" "0 63 59 63 0.497 0.497 C94\n" "0 63 59 65 0.723 0.874 C94\n" "0 5 63 39 0.009 0.654 C94\n" "0 5 63 44 -0.015 0.446 C94\n" "0 5 63 59 0.067 0.588 C94\n" "0 5 63 64 0.055 0.370 C94\n" "0 5 63 66 0.110 0.464 C94\n" "0 37 63 39 0.178 0.523 C94\n" "0 37 63 64 -0.045 0.497 C94\n" "0 39 63 64 0.422 0.409 C94\n" "0 39 63 66 0.436 0.525 C94\n" "0 44 63 64 0.581 0.426 C94\n" "0 44 63 66 0.542 0.365 C94\n" "0 59 63 64 0.852 0.332 C94\n" "0 59 63 66 0.775 0.300 C94\n" "0 5 64 63 0.086 0.345 C94\n" "0 5 64 64 0.085 0.369 C94\n" "0 5 64 65 0.051 0.436 C94\n" "0 5 64 66 0.113 0.452 C94\n" "0 37 64 63 0.059 0.299 C94\n" "0 37 64 64 0.277 0.377 C94\n" "0 63 64 64 0.206 0.030 C94\n" "0 63 64 66 0.171 0.078 C94\n" "0 64 64 65 0.079 0.403 C94\n" "0 65 64 66 0.406 0.066 C94\n" "0 39 65 64 0.528 0.644 C94\n" "0 39 65 66 0.397 0.258 C94\n" "0 44 65 64 0.816 0.543 C94\n" "0 59 65 64 1.177 0.594 C94\n" "0 63 66 64 0.213 -0.173 C94\n" "0 63 66 66 0.234 0.077 C94\n" "0 64 66 65 -0.149 0.383 C94\n" "0 65 66 66 0.199 0.101 C94\n" "0 1 68 1 0.217 0.217 C94\n" "0 1 68 23 0.285 0.050 C94\n" "0 1 68 32 -0.047 0.503 C94\n" "0 23 68 23 0.145 0.145 C94\n" "0 23 68 32 -0.182 0.504 C94\n" "0 32 69 37 1.018 0.418 C94\n" "0 37 69 37 -0.169 -0.169 C94\n" "0 31 70 31 0.210 0.210 C94\n" "0 5 78 78 0.279 0.250 C94\n" "0 5 78 81 0.083 0.250 C94\n" "0 78 78 81 -0.398 0.314 C94\n" "0 5 80 81 -0.101 0.691 C94\n" "0 81 80 81 0.732 0.732 C94\n" "0 36 81 78 0.021 0.368 C94\n" "0 36 81 80 0.018 0.422 C94\n" "0 78 81 80 0.366 0.419 C94\n"; class std::unique_ptr<MMFFDfsbCollection> MMFFDfsbCollection::ds_instance = nullptr; extern const std::string defaultMMFFDfsb; MMFFDfsbCollection *MMFFDfsbCollection::getMMFFDfsb( const std::string &mmffDfsb) { if (!ds_instance || !mmffDfsb.empty()) { ds_instance.reset(new MMFFDfsbCollection(mmffDfsb)); } return ds_instance.get(); } MMFFDfsbCollection::MMFFDfsbCollection(std::string mmffDfsb) { if (mmffDfsb.empty()) { mmffDfsb = defaultMMFFDfsb; } std::istringstream inStream(mmffDfsb); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFStbn mmffStbnObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); unsigned int iAtomicNum = boost::lexical_cast<unsigned int>(*token); ++token; unsigned int jAtomicNum = boost::lexical_cast<unsigned int>(*token); ++token; unsigned int kAtomicNum = boost::lexical_cast<unsigned int>(*token); ++token; mmffStbnObj.kbaIJK = boost::lexical_cast<double>(*token); ++token; mmffStbnObj.kbaKJI = boost::lexical_cast<double>(*token); ++token; d_params[iAtomicNum][jAtomicNum][kAtomicNum] = mmffStbnObj; } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFDfsb = "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* MMFF Default Stretch-Bend Parameters\n" "* Row in \n" "* Periodic Table \n" "* IR JR KR F(I_J,K) F(K_J,I)\n" "0 1 0 0.15 0.15\n" "0 1 1 0.10 0.30\n" "0 1 2 0.05 0.35\n" "0 1 3 0.05 0.35\n" "0 1 4 0.05 0.35\n" "0 2 0 0.00 0.00\n" "0 2 1 0.00 0.15\n" "0 2 2 0.00 0.15\n" "0 2 3 0.00 0.15\n" "0 2 4 0.00 0.15\n" "1 1 1 0.30 0.30\n" "1 1 2 0.30 0.50\n" "1 1 3 0.30 0.50\n" "1 1 4 0.30 0.50\n" "2 1 2 0.50 0.50\n" "2 1 3 0.50 0.50\n" "2 1 4 0.50 0.50\n" "3 1 3 0.50 0.50\n" "3 1 4 0.50 0.50\n" "4 1 4 0.50 0.50\n" "1 2 1 0.30 0.30\n" "1 2 2 0.25 0.25\n" "1 2 3 0.25 0.25\n" "1 2 4 0.25 0.25\n" "2 2 2 0.25 0.25\n" "2 2 3 0.25 0.25\n" "2 2 4 0.25 0.25\n" "3 2 3 0.25 0.25\n" "3 2 4 0.25 0.25\n" "4 2 4 0.25 0.25\n"; class std::unique_ptr<MMFFOopCollection> MMFFOopCollection::ds_instance[2] = {nullptr, nullptr}; extern const std::string defaultMMFFOop; extern const std::string defaultMMFFsOop; MMFFOopCollection *MMFFOopCollection::getMMFFOop(const bool isMMFFs, const std::string &mmffOop) { unsigned int i = (isMMFFs ? 1 : 0); if (!ds_instance[i] || !mmffOop.empty()) { ds_instance[i] = std::unique_ptr<MMFFOopCollection>(new MMFFOopCollection(isMMFFs, mmffOop)); } return ds_instance[i].get(); } MMFFOopCollection::MMFFOopCollection(const bool isMMFFs, std::string mmffOop) { if (mmffOop.empty()) { mmffOop = (isMMFFs ? defaultMMFFsOop : defaultMMFFOop); } std::istringstream inStream(mmffOop); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFOop mmffOopObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int iAtomType = boost::lexical_cast<unsigned int>(*token); #else d_iAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int jAtomType = boost::lexical_cast<unsigned int>(*token); #else d_jAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int kAtomType = boost::lexical_cast<unsigned int>(*token); #else d_kAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int lAtomType = boost::lexical_cast<unsigned int>(*token); #else d_lAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; mmffOopObj.koop = boost::lexical_cast<double>(*token); ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[iAtomType][jAtomType][kAtomType][lAtomType] = mmffOopObj; #else d_params.push_back(mmffOopObj); #endif } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFOop = "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* MMFF OUT-OF-PLANE PARAMETERS- Rev: 15-OCT-93 Source: MMFF\n" "* C94 - CORE MMFF parameter, from fits to HF/6-31G* 2nd D's\n" "* #C94 - Value adjusted from CORE MMFF93 value\n" "*\n" "* MMFF atom types koop Source\n" "0 2 0 0 0.020 *-2-*-* C94 DEF\n" "1 2 1 2 0.030 C94\n" "1 2 2 2 0.027 C94\n" "1 2 2 3 0.026 C94\n" "1 2 2 5 0.013 C94\n" "1 2 2 37 0.032 C94\n" "2 2 2 5 0.013 C94\n" "2 2 3 5 0.012 C94\n" "2 2 5 5 0.006 C94\n" "2 2 5 6 0.027 C94\n" "2 2 5 37 0.017 C94\n" "2 2 5 40 0.012 C94\n" "2 2 5 41 0.008 C94\n" "0 3 0 0 0.130 *-3-*-* C94 DEF\n" "1 3 1 7 0.146 C94\n" "1 3 2 7 0.138 C94\n" "1 3 3 7 0.134 C94\n" "1 3 5 7 0.122 C94\n" "1 3 6 7 0.141 C94\n" "1 3 7 10 0.129 C94\n" "1 3 7 37 0.138 C94\n" "2 3 5 7 0.113 C94\n" "2 3 5 9 0.081 C94\n" "2 3 6 7 0.127 C94\n" "2 3 7 10 0.116 C94\n" "3 3 5 7 0.113 C94\n" "3 3 6 7 0.127 C94\n" "5 3 5 7 0.103 C94\n" "5 3 5 9 0.074 C94\n" "5 3 5 54 0.078 C94\n" "5 3 6 7 0.119 C94\n" "5 3 7 10 0.102 C94\n" "5 3 9 40 0.067 C94\n" "6 3 7 37 0.127 C94\n" "7 3 10 10 0.113 C94\n" "7 3 20 20 0.151 C94\n" "9 3 40 40 0.057 C94\n" "0 8 0 0 0.000 *-8-*-* C94 DEF\n" "0 10 0 0 -0.020 *-10-*-* C94 DEF\n" "1 10 1 3 -0.02 #C94 (C93=-0.034)\n" "1 10 3 6 -0.033 C94\n" "1 10 3 28 -0.02 #C94 (C93=-0.030)\n" "3 10 3 28 -0.030 C94\n" "3 10 28 28 -0.019 C94\n" "0 17 0 0 0.000 *-17-*-* E94 DEF\n" "0 26 0 0 0.000 *-26-*-* CE4 DEF\n" "0 30 0 0 0.010 *-30-*-* C94 DEF\n" "5 30 20 30 0.008 C94\n" "0 37 0 0 0.035 *-37-*-* C94 DEF\n" "1 37 37 37 0.040 C94\n" "2 37 37 37 0.031 C94\n" "3 37 37 37 0.027 C94\n" "5 37 37 37 0.015 C94\n" "5 37 37 38 0.046 C94\n" "5 37 37 63 0.008 C94\n" "5 37 37 64 0.012 C94\n" "5 37 37 69 0.016 C94\n" "5 37 38 38 0.084 C94\n" "6 37 37 37 0.048 C94\n" "15 37 37 37 0.025 C94\n" "37 37 37 40 0.046 C94\n" "0 39 0 0 0.020 *-39-*-* C94 DEF\n" "1 39 63 63 0.012 C94\n" "23 39 63 63 -0.014 C94\n" "23 39 63 65 0.021 C94\n" "23 39 65 65 0.062 C94\n" "0 40 0 0 -0.005 *-40-*-* C94 DEF\n" "1 40 28 37 -0.006 C94\n" "2 40 28 28 -0.007 C94\n" "3 40 28 28 -0.007 C94\n" "28 40 28 37 0.004 C94\n" "0 41 0 0 0.180 *-41-*-* C94 DEF\n" "1 41 32 32 0.178 C94\n" "2 41 32 32 0.161 C94\n" "5 41 32 32 0.158 C94\n" "0 43 0 0 0.000 *-43-*-* E94 DEF\n" "0 45 0 0 0.150 *-45-*-* E94 DEF\n" "0 49 0 0 0.000 *-49-*-* E94 DEF\n" "50 49 50 50 0.000 #C94\n" "0 54 0 0 0.020 *-54-*-* C94 DEF\n" "1 54 3 36 0.016 C94\n" "3 54 36 36 0.018 C94\n" "0 55 0 0 0.020 *-55-*-* C94 DEF\n" "1 55 36 57 0.020 #C94\n" "36 55 36 57 0.020 #C94\n" "0 56 0 0 0.020 *-56-*-* C94 DEF\n" "1 56 36 57 0.020 #C94\n" "36 56 36 57 0.020 #C94\n" "0 57 0 0 0.080 *-57-*-* C94 DEF\n" "5 57 55 55 0.038 C94\n" "56 57 56 56 0.158 C94\n" "0 58 0 0 0.025 *-58-*-* E94 DEF\n" "0 63 0 0 0.050 *-63-*-* C94 DEF\n" "5 63 39 64 0.019 C94\n" "5 63 39 66 0.068 C94\n" "5 63 44 64 0.014 C94\n" "5 63 44 66 0.055 C94\n" "5 63 59 64 0.033 C94\n" "5 63 59 66 0.085 C94\n" "37 63 39 64 0.010 C94\n" "0 64 0 0 0.040 *-64-*-* C94 DEF\n" "5 64 63 64 0.006 C94\n" "5 64 63 66 0.043 C94\n" "5 64 64 65 0.052 C94\n" "5 64 65 66 0.094 C94\n" "37 64 63 64 -0.011 C94\n" "0 67 0 0 0.070 *-67-*-* E94 DEF\n" "0 69 0 0 0.070 *-69-*-* C94 DEF\n" "32 69 37 37 0.067 C94\n" "0 73 0 0 0.000 *-73-*-* E94 DEF\n" "0 78 0 0 0.045 *-78-*-* C94 DEF\n" "5 78 78 81 0.046 C94\n" "0 80 0 0 0.080 *-80-*-* C94 DEF\n" "5 80 81 81 0.057 C94\n" "0 81 0 0 0.025 *-81-*-* C94 DEF\n" "36 81 78 80 0.016 C94\n" "0 82 0 0 0.000 *-82-*-* E94 DEF\n"; const std::string defaultMMFFsOop = "*\n" "* Copyright (c) Merck and Co., Inc., 1994,1995,1996,1997,1998,1999\n" "* All Rights Reserved\n" "*\n" "* MMFF94s OUT-OF-PLANE PARAMETERS\n" "* C94 - CORE MMFF94 parameter, from fits to HF/6-31G* 2nd D's\n" "* C94S - CORE MMFF94s parameter,adjusted to promote planarity\n" "* #C94 - Value adjusted from CORE MMFF94 value\n" "*\n" "* MMFF atom types koop Source\n" "0 2 0 0 0.020 *-2-*-* C94 DEF\n" "1 2 1 2 0.030 C94\n" "1 2 2 2 0.027 C94\n" "1 2 2 3 0.026 C94\n" "1 2 2 5 0.013 C94\n" "1 2 2 37 0.032 C94\n" "2 2 2 5 0.013 C94\n" "2 2 3 5 0.012 C94\n" "2 2 5 5 0.006 C94\n" "2 2 5 6 0.027 C94\n" "2 2 5 37 0.017 C94\n" "2 2 5 40 0.012 C94\n" "2 2 5 41 0.008 C94\n" "0 3 0 0 0.130 *-3-*-* C94 DEF\n" "1 3 1 7 0.146 C94\n" "1 3 2 7 0.138 C94\n" "1 3 3 7 0.134 C94\n" "1 3 5 7 0.122 C94\n" "1 3 6 7 0.141 C94\n" "1 3 7 10 0.129 C94\n" "1 3 7 37 0.138 C94\n" "2 3 5 7 0.113 C94\n" "2 3 5 9 0.081 C94\n" "2 3 6 7 0.127 C94\n" "2 3 7 10 0.116 C94\n" "3 3 5 7 0.113 C94\n" "3 3 6 7 0.127 C94\n" "5 3 5 7 0.103 C94\n" "5 3 5 9 0.074 C94\n" "5 3 5 54 0.078 C94\n" "5 3 6 7 0.119 C94\n" "5 3 7 10 0.102 C94\n" "5 3 9 40 0.067 C94\n" "6 3 7 37 0.127 C94\n" "7 3 10 10 0.113 C94\n" "7 3 20 20 0.151 C94\n" "9 3 40 40 0.057 C94\n" "0 8 0 0 0.000 *-8-*-* C94 DEF\n" "0 10 0 0 0.015 *-10-*-* C94S DEF\n" "1 10 1 3 0.015 C94S\n" "1 10 3 6 0.015 C94\n" "1 10 3 28 0.015 C94S\n" "3 10 3 28 0.015 C94S\n" "3 10 28 28 0.015 C94S\n" "0 17 0 0 0.000 *-17-*-* E94 DEF\n" "0 26 0 0 0.000 *-26-*-* CE4 DEF\n" "0 30 0 0 0.010 *-30-*-* C94 DEF\n" "5 30 20 30 0.008 C94\n" "0 37 0 0 0.035 *-37-*-* C94 DEF\n" "1 37 37 37 0.040 C94\n" "2 37 37 37 0.031 C94\n" "3 37 37 37 0.027 C94\n" "5 37 37 37 0.015 C94\n" "5 37 37 38 0.046 C94\n" "5 37 37 63 0.008 C94\n" "5 37 37 64 0.012 C94\n" "5 37 37 69 0.016 C94\n" "5 37 38 38 0.084 C94\n" "6 37 37 37 0.048 C94\n" "15 37 37 37 0.025 C94\n" "37 37 37 40 0.046 C94\n" "0 39 0 0 0.020 *-39-*-* C94 DEF\n" "1 39 63 63 0.012 C94\n" "23 39 63 63 -0.014 C94\n" "23 39 63 65 0.021 C94\n" "23 39 65 65 0.062 C94\n" "0 40 0 0 0.030 *-40-*-* C94S DEF\n" "1 40 28 37 0.030 C94S\n" "2 40 28 28 0.030 C94S\n" "3 40 28 28 0.030 C94S\n" "28 40 28 37 0.030 C94S\n" "0 41 0 0 0.180 *-41-*-* C94 DEF\n" "1 41 32 32 0.178 C94\n" "2 41 32 32 0.161 C94\n" "5 41 32 32 0.158 C94\n" "0 43 0 0 0.000 *-43-*-* E94 DEF\n" "0 45 0 0 0.150 *-45-*-* E94 DEF\n" "0 49 0 0 0.000 *-49-*-* E94 DEF\n" "50 49 50 50 0.000 #C94\n" "0 54 0 0 0.020 *-54-*-* C94 DEF\n" "1 54 3 36 0.016 C94\n" "3 54 36 36 0.018 C94\n" "0 55 0 0 0.020 *-55-*-* C94 DEF\n" "1 55 36 57 0.020 #C94\n" "36 55 36 57 0.020 #C94\n" "0 56 0 0 0.020 *-56-*-* C94 DEF\n" "1 56 36 57 0.020 #C94\n" "36 56 36 57 0.020 #C94\n" "0 57 0 0 0.080 *-57-*-* C94 DEF\n" "5 57 55 55 0.038 C94\n" "56 57 56 56 0.158 C94\n" "0 58 0 0 0.025 *-58-*-* E94 DEF\n" "0 63 0 0 0.050 *-63-*-* C94 DEF\n" "5 63 39 64 0.019 C94\n" "5 63 39 66 0.068 C94\n" "5 63 44 64 0.014 C94\n" "5 63 44 66 0.055 C94\n" "5 63 59 64 0.033 C94\n" "5 63 59 66 0.085 C94\n" "37 63 39 64 0.010 C94\n" "0 64 0 0 0.040 *-64-*-* C94 DEF\n" "5 64 63 64 0.006 C94\n" "5 64 63 66 0.043 C94\n" "5 64 64 65 0.052 C94\n" "5 64 65 66 0.094 C94\n" "37 64 63 64 -0.011 C94\n" "0 67 0 0 0.070 *-67-*-* E94 DEF\n" "0 69 0 0 0.070 *-69-*-* C94 DEF\n" "32 69 37 37 0.067 C94\n" "0 73 0 0 0.000 *-73-*-* E94 DEF\n" "0 78 0 0 0.045 *-78-*-* C94 DEF\n" "5 78 78 81 0.046 C94\n" "0 80 0 0 0.080 *-80-*-* C94 DEF\n" "5 80 81 81 0.057 C94\n" "0 81 0 0 0.025 *-81-*-* C94 DEF\n" "36 81 78 80 0.016 C94\n" "0 82 0 0 0.000 *-82-*-* E94 DEF\n"; class std::unique_ptr<MMFFTorCollection> MMFFTorCollection::ds_instance[2] = {nullptr, nullptr}; extern const std::string defaultMMFFTor; extern const std::string defaultMMFFsTor; MMFFTorCollection *MMFFTorCollection::getMMFFTor(const bool isMMFFs, const std::string &mmffTor) { unsigned int i = (isMMFFs ? 1 : 0); if (!ds_instance[i] || !mmffTor.empty()) { ds_instance[i] = std::unique_ptr<MMFFTorCollection>(new MMFFTorCollection(isMMFFs, mmffTor)); } return ds_instance[i].get(); } MMFFTorCollection::MMFFTorCollection(const bool isMMFFs, std::string mmffTor) { if (mmffTor.empty()) { mmffTor = (isMMFFs ? defaultMMFFsTor : defaultMMFFTor); } std::istringstream inStream(mmffTor); std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { MMFFTor mmffTorObj; boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int torType = boost::lexical_cast<unsigned int>(*token); #else d_torType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int iAtomType = boost::lexical_cast<unsigned int>(*token); #else d_iAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int jAtomType = boost::lexical_cast<unsigned int>(*token); #else d_jAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int kAtomType = boost::lexical_cast<unsigned int>(*token); #else d_kAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int lAtomType = boost::lexical_cast<unsigned int>(*token); #else d_lAtomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; mmffTorObj.V1 = boost::lexical_cast<double>(*token); ++token; mmffTorObj.V2 = boost::lexical_cast<double>(*token); ++token; mmffTorObj.V3 = boost::lexical_cast<double>(*token); ++token; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[torType][iAtomType][jAtomType][kAtomType][lAtomType] = mmffTorObj; #else d_params.push_back(mmffTorObj); #endif } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFTor = "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* MMFF TORSION PARAMETERS- Rev: 26-OCT-94 Source: MMFF\n" "* C94 - CORE MMFF parameter - from fits to conformational energies\n" "* X94 - EXTD MMFF parameter - also from fits to conformational E's\n" "* E94 - EXTD MMFF parameter - from empirical rule\n" "* #E94 - Adjusted from empirical rule value\n" "*\n" "* atom types V1 V2 V3 Source\n" "0 0 1 1 0 0.000 0.000 0.300 C94 " "0:*-1-1-* Def\n" "5 0 1 1 0 0.200 -0.800 1.500 C94 " "5:*-1-1-* Def\n" "0 1 1 1 1 0.103 0.681 0.332 C94\n" "5 1 1 1 1 0.144 -0.547 1.126 C94\n" "0 1 1 1 2 -0.295 0.438 0.584 C94\n" "0 1 1 1 3 0.066 -0.156 0.143 C94\n" "0 1 1 1 5 0.639 -0.630 0.264 C94\n" "0 1 1 1 6 -0.688 1.757 0.477 C94\n" "5 1 1 1 6 0.000 0.000 0.054 C94\n" "0 1 1 1 8 -1.420 -0.092 1.101 C94\n" "5 1 1 1 8 0.000 -0.158 0.323 C94\n" "0 1 1 1 11 0.593 0.662 1.120 C94\n" "0 1 1 1 12 -0.678 0.417 0.624 C94\n" "0 1 1 1 15 -0.714 0.698 0.000 C94\n" "0 1 1 1 34 -0.647 0.550 0.590 C94\n" "0 2 1 1 5 0.321 -0.411 0.144 C94\n" "0 3 1 1 3 0.443 0.000 -1.140 C94\n" "0 3 1 1 5 -0.256 0.058 0.000 C94\n" "0 3 1 1 6 -0.679 -0.029 0.000 C94\n" "0 5 1 1 5 0.284 -1.386 0.314 C94\n" "0 5 1 1 6 -0.654 1.072 0.279 C94\n" "0 5 1 1 8 -0.744 -1.235 0.337 C94\n" "0 5 1 1 10 0.000 0.000 0.427 C94\n" "0 5 1 1 11 0.000 0.516 0.291 C94\n" "0 5 1 1 12 0.678 -0.602 0.398 C94\n" "0 5 1 1 15 1.142 -0.644 0.367 C94\n" "0 5 1 1 25 0.000 0.000 0.295 X94\n" "0 5 1 1 34 0.692 -0.530 0.278 C94\n" "0 5 1 1 37 0.000 0.000 0.389 C94\n" "0 5 1 1 39 0.000 0.000 0.278 C94\n" "0 5 1 1 41 0.000 0.000 -0.141 C94\n" "0 5 1 1 56 0.000 0.000 0.324 C94\n" "0 5 1 1 68 0.000 0.000 0.136 C94\n" "0 6 1 1 6 0.408 1.397 0.961 C94\n" "5 6 1 1 6 0.313 -1.035 1.631 C94\n" "0 8 1 1 8 1.055 0.834 0.000 C94\n" "0 11 1 1 11 -0.387 -0.543 1.405 C94\n" "0 12 1 1 12 0.000 0.000 0.893 C94\n" "0 15 1 1 15 -0.177 0.000 0.049 C94\n" "0 0 1 2 0 0.000 0.000 0.000 C94 " "0:*-1-2-* Def\n" "2 0 1 2 0 0.000 0.000 0.000 E94 " "2:*1-2-* Def\n" "5 0 1 2 0 0.000 0.000 0.000 C94 " "5:*-1-2-* Def\n" "0 0 1 2 2 0.000 0.000 -0.650 C94 " "0:*-1-2=2 Def\n" "5 0 1 2 2 0.000 0.000 -0.650 C94 " "5:*-1-2=2 Def\n" "0 1 1 2 1 0.419 0.296 0.282 C94\n" "0 1 1 2 2 -0.494 0.274 -0.630 C94\n" "0 1 1 2 5 0.075 0.000 0.358 C94\n" "0 2 1 2 2 -0.293 0.115 -0.508 C94\n" "0 2 1 2 5 0.301 0.104 0.507 C94\n" "0 3 1 2 1 0.565 -0.554 0.234 C94\n" "0 3 1 2 2 -0.577 -0.482 -0.427 C94\n" "0 3 1 2 5 0.082 0.000 0.123 C94\n" "0 5 1 2 1 0.000 -0.184 0.220 C94\n" "0 5 1 2 2 0.501 -0.410 -0.535 C94\n" "2 5 1 2 2 0.000 0.000 0.055 C94\n" "2 5 1 2 3 0.000 0.000 -0.108 C94\n" "0 5 1 2 5 -0.523 -0.228 0.208 C94\n" "2 5 1 2 37 0.000 0.000 0.000 C94\n" "0 6 1 2 1 -0.467 0.000 0.490 C94\n" "0 6 1 2 2 0.425 0.168 -0.875 C94\n" "0 6 1 2 5 0.000 0.136 0.396 C94\n" "0 8 1 2 1 -0.504 0.371 0.557 C94\n" "0 8 1 2 2 0.541 0.539 -1.009 C94\n" "0 8 1 2 5 0.000 0.204 0.464 C94\n" "0 0 1 3 0 0.000 0.400 0.300 C94 " "0:*-1-3-* Def\n" "2 0 1 3 0 0.000 0.500 0.350 C94 " "2:*-1-3-* Def\n" "5 0 1 3 0 0.000 0.000 0.000 E94 " "5:*1-3-* Def\n" "0 0 1 3 1 0.000 0.000 0.550 C94 " "0:*-1-3-1 Def\n" "0 0 1 3 5 0.000 0.200 0.700 C94 " "0:*-1-3-5 Def\n" "0 0 1 3 7 0.000 0.400 0.400 C94 " "0:*-1-3-7 Def\n" "0 1 1 3 1 0.103 0.177 0.545 C94\n" "0 1 1 3 5 -0.072 0.316 0.674 C94\n" "0 1 1 3 6 -0.117 -0.333 0.202 C94\n" "0 1 1 3 7 0.825 0.139 0.325 C94\n" "0 1 1 3 10 -0.927 1.112 1.388 C94\n" "0 2 1 3 5 0.663 -0.167 0.426 C94\n" "0 2 1 3 7 -0.758 0.112 0.563 C94\n" "0 5 1 3 1 -0.073 0.085 0.531 C94\n" "2 5 1 3 2 0.000 0.000 0.115 C94\n" "2 5 1 3 3 0.000 0.000 0.446 C94\n" "0 5 1 3 5 -0.822 0.501 1.008 C94\n" "0 5 1 3 6 0.000 -0.624 0.330 C94\n" "0 5 1 3 7 0.659 -1.407 0.308 C94\n" "0 5 1 3 10 -0.412 0.693 0.087 C94\n" "2 5 1 3 37 0.000 0.000 0.056 C94\n" "0 5 1 3 43 0.000 1.027 0.360 X94\n" "0 5 1 3 51 0.000 1.543 0.350 X94\n" "0 5 1 3 53 0.000 0.501 0.000 X94\n" "0 5 1 3 74 0.000 0.513 -0.344 X94\n" "0 5 1 3 75 0.000 0.511 -0.186 X94\n" "0 6 1 3 6 0.447 0.652 0.318 C94\n" "0 6 1 3 7 -0.395 0.730 -0.139 C94\n" "0 10 1 3 7 0.338 2.772 2.145 C94\n" "0 10 1 3 10 0.548 0.000 1.795 C94\n" "0 0 1 4 0 0.000 0.000 0.000 C94 " "0:*-1-4-* Def\n" "0 0 1 6 0 0.000 0.000 0.200 C94 " "0:*-1-6-* Def\n" "5 0 1 6 0 0.000 -0.200 0.400 C94 " "5:*-1-6-* Def\n" "0 1 1 6 1 -0.681 0.755 0.755 C94\n" "5 1 1 6 1 0.000 0.243 -0.596 C94\n" "0 1 1 6 3 -0.547 0.000 0.320 C94\n" "0 1 1 6 21 0.000 0.270 0.237 C94\n" "0 2 1 6 21 0.102 0.460 -0.128 C94\n" "0 3 1 6 21 -1.652 -1.660 0.283 C94\n" "0 5 1 6 1 0.571 0.319 0.570 C94\n" "0 5 1 6 2 0.000 0.000 0.306 C94\n" "0 5 1 6 3 0.572 0.000 -0.304 C94\n" "0 5 1 6 21 0.596 -0.276 0.346 C94\n" "0 5 1 6 25 0.000 0.000 0.061 X94\n" "0 5 1 6 37 0.000 0.000 0.106 C94\n" "0 5 1 6 45 0.000 0.000 -0.174 X94\n" "0 6 1 6 1 0.229 -0.710 0.722 C94\n" "5 6 1 6 1 0.000 0.000 0.040 C94\n" "0 6 1 6 21 1.488 -3.401 -0.320 C94\n" "0 37 1 6 21 0.712 1.320 -0.507 C94\n" "0 0 1 8 0 0.000 -0.300 0.500 C94 " "0:*-1-8-* Def\n" "5 0 1 8 0 0.000 0.000 0.297 E94 " "5:*1-8-* Def\n" "0 1 1 8 1 -0.439 0.786 0.272 C94\n" "5 1 1 8 1 0.115 -0.390 0.658 C94\n" "0 1 1 8 6 -0.608 0.339 1.496 C94\n" "0 1 1 8 23 -0.428 0.323 0.280 C94\n" "0 2 1 8 23 0.594 -0.409 0.155 C94\n" "0 5 1 8 1 0.393 -0.385 0.562 C94\n" "0 5 1 8 6 0.598 -0.158 0.399 C94\n" "0 5 1 8 23 -0.152 -0.440 0.357 C94\n" "0 0 1 9 0 0.000 0.000 0.000 C94 " "0:*-1-9-* Def\n" "5 0 1 9 0 0.000 0.000 0.000 E94 " "5:*1-9-* Def\n" "0 5 1 9 3 0.204 -0.335 -0.352 C94\n" "0 5 1 9 53 0.000 0.000 0.097 X94\n" "0 0 1 10 0 0.000 0.000 0.300 C94 " "0:*-1-10-* Def\n" "5 0 1 10 0 0.000 0.000 0.000 E94 " "5:*1-10-* Def\n" "0 0 1 10 3 0.000 0.000 1.000 C94 " "0:*-1-10-3 Def\n" "0 1 1 10 3 -1.027 0.694 0.948 C94\n" "0 1 1 10 6 0.159 -0.552 0.198 C94\n" "0 1 1 10 28 0.552 -0.380 0.326 C94\n" "0 3 1 10 3 3.100 -2.529 1.494 C94\n" "0 3 1 10 28 0.079 0.280 0.402 C94\n" "0 5 1 10 1 0.000 0.000 0.779 C94\n" "0 5 1 10 3 -2.099 1.363 0.021 C94\n" "0 5 1 10 6 -0.162 0.832 0.552 C94\n" "0 5 1 10 28 -0.616 0.000 0.274 C94\n" "0 0 1 15 0 0.000 0.000 0.400 C94 " "0:*-1-15-* Def\n" "5 0 1 15 0 0.000 0.000 0.336 E94 " "5:*1-15-* Def\n" "0 1 1 15 1 -1.047 0.170 0.398 C94\n" "0 1 1 15 15 -1.438 0.263 0.501 C94\n" "0 1 1 15 71 -0.376 -0.133 0.288 C94\n" "0 5 1 15 1 1.143 -0.231 0.447 C94\n" "0 5 1 15 15 1.555 -0.323 0.456 C94\n" "0 5 1 15 37 0.000 0.000 0.459 C94\n" "0 5 1 15 71 0.229 0.203 0.440 C94\n" "0 0 1 17 0 0.000 0.000 0.350 C94 " "0:*-1-17-* Def\n" "5 0 1 17 0 0.000 0.000 0.000 E94 " "5:*1-17-* Def\n" "0 5 1 17 1 0.000 0.000 0.536 X94\n" "0 5 1 17 7 0.000 0.000 0.212 X94\n" "0 0 1 18 0 0.000 0.000 0.100 C94 " "0:*-1-18-* Def\n" "5 0 1 18 0 0.000 0.000 0.112 E94 " "5:*1-18-* Def\n" "0 5 1 18 1 0.000 0.000 0.000 X94\n" "0 5 1 18 6 0.000 0.000 0.099 X94\n" "0 5 1 18 32 0.000 0.585 0.388 X94\n" "0 5 1 18 43 0.000 -0.412 0.121 X94\n" "0 5 1 18 48 0.000 0.000 0.195 X94\n" "0 5 1 18 62 0.000 0.000 -0.088 X94\n" "0 0 1 19 0 0.000 0.000 0.150 C94 " "0:*-1-19-* Def\n" "5 0 1 19 0 0.000 0.000 0.179 E94 " "5:*1-19-* Def\n" "0 5 1 19 5 0.000 0.000 0.196 X94\n" "0 5 1 19 6 0.000 0.000 0.176 X94\n" "0 5 1 19 12 0.000 0.000 0.152 X94\n" "0 0 1 20 0 0.000 0.000 0.350 C94 " "0:*-1-20-* Def\n" "5 0 1 20 0 0.000 0.000 0.350 C94 " "5:*-1-20-* Def\n" "0 5 1 20 5 0.000 0.000 0.344 C94\n" "0 5 1 20 20 0.000 0.000 0.361 C94\n" "0 0 1 22 0 0.000 0.000 0.236 E94 " "0:*1-22-* Def\n" "5 0 1 22 0 0.000 0.000 0.236 E94 " "5:*1-22-* Def\n" "0 0 1 25 0 0.000 0.000 0.300 C94 " "0:*-1-25-* Def\n" "5 0 1 25 0 0.000 0.000 0.251 E94 " "5:*1-25-* Def\n" "0 1 1 25 1 0.000 -0.207 0.232 X94\n" "0 1 1 25 32 0.000 0.288 0.218 X94\n" "0 5 1 25 1 0.000 0.152 0.235 X94\n" "0 5 1 25 6 0.000 0.000 0.495 X94\n" "0 5 1 25 32 0.000 -0.130 0.214 X94\n" "0 5 1 25 43 0.000 0.000 0.466 X94\n" "0 5 1 25 72 0.000 0.000 0.243 X94\n" "0 0 1 26 0 0.000 0.000 0.450 C94 " "0:*-1-26-* Def\n" "5 0 1 26 0 0.000 0.000 0.376 E94 " "5:*1-26-* Def\n" "0 5 1 26 12 0.000 0.000 0.439 X94\n" "0 5 1 26 71 0.000 0.000 0.472 X94\n" "0 0 1 34 0 0.000 0.000 0.250 C94 " "0:*-1-34-* Def\n" "5 0 1 34 0 0.000 0.000 0.198 E94 " "5:*1-34-* Def\n" "0 1 1 34 36 0.000 0.000 0.187 C94\n" "0 5 1 34 1 0.000 0.000 0.247 C94\n" "0 5 1 34 36 0.000 0.000 0.259 C94\n" "0 0 1 37 0 0.000 0.000 0.200 C94 " "0:*-1-37-* Def\n" "5 0 1 37 0 0.000 0.000 0.000 E94 " "5:*1-37-* Def\n" "0 1 1 37 37 0.000 0.449 0.000 C94\n" "0 5 1 37 37 0.000 -0.420 0.391 C94\n" "0 6 1 37 37 0.000 0.000 0.150 C94\n" "0 0 1 39 0 0.000 0.000 0.000 C94 " "0:*-1-39-* Def\n" "5 0 1 39 0 0.000 0.000 0.000 E94 " "5:*1-39-* Def\n" "0 1 1 39 63 0.000 -0.080 -0.056 C94\n" "0 5 1 39 63 0.000 0.000 -0.113 C94\n" "0 0 1 40 0 0.000 0.000 0.250 C94 " "0:*-1-40-* Def\n" "5 0 1 40 0 0.000 0.000 0.297 E94 " "5:*1-40-* Def\n" "0 5 1 40 28 0.000 -0.097 0.203 C94\n" "0 5 1 40 37 0.000 0.000 0.329 C94\n" "0 0 1 41 0 0.000 0.600 0.000 C94 " "0:*-1-41-* Def\n" "0 1 1 41 32 0.000 1.263 0.000 C94\n" "0 5 1 41 32 0.000 0.000 -0.106 C94\n" "0 5 1 41 72 0.000 0.632 0.000 X94\n" "0 0 1 43 0 0.000 0.000 0.150 C94 " "0:*-1-43-* Def\n" "5 0 1 43 0 0.000 0.000 0.297 E94 " "5:*1-43-* Def\n" "0 5 1 43 18 0.357 -0.918 0.000 X94\n" "0 5 1 43 25 0.000 0.000 0.061 X94\n" "0 5 1 43 28 -0.249 0.382 0.343 X94\n" "0 0 1 45 0 0.000 0.000 0.100 C94 " "0:*-1-45-* Def\n" "0 5 1 45 32 0.000 0.000 0.125 X94\n" "0 0 1 46 0 0.000 0.000 -0.500 C94 " "0:*-1-46-* Def\n" "0 5 1 46 7 0.000 0.000 -0.540 X94\n" "0 0 1 54 0 0.000 0.000 0.000 C94 " "0:*-1-54-* Def\n" "2 0 1 54 0 0.000 0.000 0.000 E94 " "2:*1-54-* Def\n" "5 0 1 54 0 0.000 0.000 0.000 E94 " "5:*1-54-* Def\n" "0 5 1 54 3 0.000 0.000 -0.315 C94\n" "0 5 1 54 36 0.000 0.000 0.315 C94\n" "0 0 1 55 0 0.000 0.000 0.000 C94 " "0:*-1-55-* Def\n" "5 0 1 55 0 0.000 0.000 0.000 E94 " "5:*1-55-* Def\n" "0 5 1 55 36 0.000 -0.058 0.084 C94\n" "0 5 1 55 57 0.000 -0.058 -0.092 C94\n" "0 0 1 56 0 0.000 0.000 -0.300 C94 " "0:*-1-56-* Def\n" "0 1 1 56 36 0.875 0.668 -0.015 C94\n" "0 1 1 56 57 -0.870 0.775 -0.406 C94\n" "0 5 1 56 36 -0.958 -0.629 -0.372 C94\n" "0 5 1 56 57 0.952 -0.715 -0.483 C94\n" "0 0 1 57 0 0.000 0.000 0.000 E94 " "0:*1-57-* Def\n" "5 0 1 57 0 0.000 0.000 0.000 E94 " "5:*1-57-* Def\n" "0 0 1 58 0 0.000 0.000 0.000 E94 " "0:*1-58-* Def\n" "0 0 1 62 0 0.000 0.000 0.250 C94 " "0:*-1-62-* Def\n" "0 5 1 62 18 0.000 0.000 0.270 X94\n" "0 0 1 63 0 0.000 0.000 0.000 E94 " "0:*1-63-* Def\n" "5 0 1 63 0 0.000 0.000 0.000 E94 " "5:*1-63-* Def\n" "0 0 1 64 0 0.000 0.000 0.000 E94 " "0:*1-64-* Def\n" "5 0 1 64 0 0.000 0.000 0.000 E94 " "5:*1-64-* Def\n" "0 0 1 67 0 0.000 0.000 0.000 E94 " "0:*1-67-* Def\n" "5 0 1 67 0 0.000 0.000 0.000 E94 " "5:*1-67-* Def\n" "0 0 1 68 0 0.000 0.000 0.400 C94 " "0:*-1-68-* Def\n" "0 1 1 68 1 -0.117 0.090 0.751 C94\n" "0 1 1 68 23 0.373 0.153 0.635 C94\n" "0 1 1 68 32 -0.090 -0.169 0.075 C94\n" "0 5 1 68 1 0.134 -0.112 0.329 C94\n" "0 5 1 68 23 -0.361 -0.202 0.560 C94\n" "0 5 1 68 32 0.072 0.218 0.093 C94\n" "0 0 1 73 0 0.000 0.000 0.500 C94 " "0:*-1-73-* Def\n" "0 5 1 73 32 0.000 0.000 0.509 X94\n" "0 5 1 73 72 0.000 0.000 0.443 X94\n" "0 0 1 75 0 0.000 0.000 0.000 E94 " "0:*1-75-* Def\n" "0 0 1 78 0 0.000 0.000 0.000 E94 " "0:*1-78-* Def\n" "0 0 1 80 0 0.000 0.000 0.000 E94 " "0:*1-80-* Def\n" "0 0 1 81 0 0.000 0.000 0.000 E94 " "0:*1-81-* Def\n" "0 0 2 2 0 0.000 12.000 0.000 C94 " "0:*-2=2-* Def\n" "1 0 2 2 0 0.000 1.800 0.000 C94 " "1:*=2-2=* Def\n" "5 0 2 2 0 0.000 12.000 0.000 C94 " "5:*-2=2-* Def\n" "0 1 2 2 1 -0.403 12.000 0.000 C94\n" "0 1 2 2 2 0.000 12.000 0.000 C94\n" "1 1 2 2 2 -0.418 2.089 -0.310 C94\n" "0 1 2 2 5 0.000 12.000 0.000 C94\n" "1 1 2 2 5 0.412 2.120 0.269 C94\n" "1 2 2 2 2 0.094 1.621 0.877 C94\n" "0 2 2 2 5 0.000 12.000 0.000 C94\n" "1 2 2 2 5 0.317 1.421 -0.870 C94\n" "0 3 2 2 5 0.000 12.000 0.000 C94\n" "0 5 2 2 5 0.000 12.000 0.000 C94\n" "1 5 2 2 5 -0.406 1.767 0.000 C94\n" "0 5 2 2 6 0.000 12.000 0.000 C94\n" "0 5 2 2 37 0.000 12.000 0.000 C94\n" "0 5 2 2 40 0.000 12.000 0.000 C94\n" "0 5 2 2 41 0.000 12.000 0.000 C94\n" "0 5 2 2 45 0.000 12.000 0.000 X94\n" "0 5 2 2 62 0.000 12.000 0.000 X94\n" "1 0 2 3 0 0.000 2.500 0.000 #E94 " "0:*-2-3-* Def\n" "1 1 2 3 1 0.136 1.798 0.630 C94\n" "1 1 2 3 5 0.497 2.405 0.357 C94\n" "1 1 2 3 6 -0.211 1.925 -0.131 C94\n" "1 1 2 3 7 -0.401 2.028 -0.318 C94\n" "1 1 2 3 10 -0.084 2.214 -0.610 C94\n" "1 2 2 3 1 -0.325 1.553 -0.487 C94\n" "1 2 2 3 5 -0.295 2.024 -0.590 C94\n" "1 2 2 3 6 -0.143 1.466 0.000 C94\n" "1 2 2 3 7 0.362 1.978 0.000 C94\n" "1 2 2 3 9 0.296 1.514 0.481 C94\n" "1 2 2 3 10 0.095 1.583 0.380 C94\n" "1 5 2 3 1 0.213 1.728 -0.042 C94\n" "1 5 2 3 5 -0.208 1.622 0.223 C94\n" "1 5 2 3 6 0.359 1.539 0.194 C94\n" "1 5 2 3 7 0.000 2.046 0.000 C94\n" "1 5 2 3 9 -0.290 1.519 -0.470 C94\n" "1 5 2 3 10 0.000 1.395 0.227 C94\n" "1 0 2 4 0 0.000 0.000 0.000 C94 " "0:*-2-4-* Def\n" "0 0 2 6 0 0.000 3.100 0.000 C94 " "0:*-2-6-* Def\n" "2 0 2 6 0 0.000 3.600 0.000 E94 " "2:*-2-6-* Def\n" "5 0 2 6 0 0.000 3.600 0.000 E94 " "5:*-2-6-* Def\n" "0 2 2 6 1 -1.953 3.953 -1.055 C94\n" "0 2 2 6 3 -1.712 2.596 -0.330 C94\n" "0 2 2 6 29 -0.215 2.810 -0.456 C94\n" "0 5 2 6 1 1.951 3.936 1.130 C94\n" "0 5 2 6 3 1.719 2.628 0.360 C94\n" "0 5 2 6 29 0.216 2.808 0.456 C94\n" "1 0 2 9 0 0.000 1.800 0.000 E94 " "1:*-2-9-* Def\n" "0 0 2 10 0 0.000 6.000 0.000 E94 " "0:*-2-10-* Def\n" "2 0 2 10 0 0.000 6.000 0.000 E94 " "2:*-2-10-* Def\n" "5 0 2 10 0 0.000 6.000 0.000 E94 " "5:*-2-10-* Def\n" "0 0 2 15 0 0.000 1.423 0.000 E94 " "0:*-2-15-* Def\n" "2 0 2 15 0 0.000 1.423 0.000 E94 " "2:*-2-15-* Def\n" "5 0 2 15 0 0.000 1.423 0.000 E94 " "5:*-2-15-* Def\n" "0 0 2 17 0 0.000 1.423 0.000 E94 " "0:*-2-17-* Def\n" "0 0 2 18 0 0.000 0.000 0.000 E94 " "0:*-2-18-* Def\n" "2 0 2 18 0 0.000 0.000 0.000 E94 " "2:*-2-18-* Def\n" "5 0 2 18 0 0.000 0.000 0.000 E94 " "5:*-2-18-* Def\n" "0 0 2 19 0 0.000 0.000 0.000 E94 " "0:*-2-19-* Def\n" "0 0 2 20 0 0.000 0.000 0.000 E94 " "0:*-2-20-* Def\n" "2 0 2 20 0 0.000 0.000 0.000 E94 " "2:*-2-20-* Def\n" "0 0 2 22 0 0.000 0.000 0.000 E94 " "0:*-2-22-* Def\n" "2 0 2 22 0 0.000 0.000 0.000 E94 " "2:*-2-22-* Def\n" "5 0 2 22 0 0.000 0.000 0.000 E94 " "5:*-2-22-* Def\n" "0 0 2 25 0 0.000 0.000 0.000 E94 " "0:*-2-25-* Def\n" "0 0 2 30 0 0.000 12.000 0.000 E94 " "0:*-2-30-* Def\n" "0 0 2 34 0 0.000 0.000 0.000 E94 " "0:*-2-34-* Def\n" "2 0 2 34 0 0.000 0.000 0.000 E94 " "2:*-2-34-* Def\n" "1 0 2 37 0 0.000 2.000 0.000 C94 " "1:*-2-37-* Def\n" "1 1 2 37 37 0.000 2.952 -0.079 C94\n" "1 2 2 37 37 0.000 1.542 0.434 C94\n" "1 5 2 37 37 0.000 1.308 -0.357 C94\n" "1 0 2 39 0 0.000 6.000 0.000 E94 " "1:*-2-39-* Def\n" "0 0 2 40 0 0.000 3.700 0.000 C94 " "0:*-2-40-* Def\n" "2 0 2 40 0 0.000 3.600 0.000 E94 " "2:*-2-40-* Def\n" "5 0 2 40 0 0.000 3.600 0.000 E94 " "5:*-2-40-* Def\n" "0 2 2 40 28 0.000 3.756 -0.530 C94\n" "0 5 2 40 28 0.073 3.698 0.291 C94\n" "0 0 2 41 0 0.000 1.200 0.000 C94 " "0:*-2-41-* Def\n" "2 0 2 41 0 0.000 1.800 0.000 E94 " "2:*-2-41-* Def\n" "0 2 2 41 32 0.000 1.235 0.000 C94\n" "0 5 2 41 32 0.000 1.231 0.000 C94\n" "0 0 2 43 0 0.000 3.600 0.000 E94 " "0:*-2-43-* Def\n" "2 0 2 43 0 0.000 3.600 0.000 E94 " "2:*-2-43-* Def\n" "0 0 2 45 0 0.000 2.200 0.000 C94 " "0:*-2-45-* Def\n" "2 0 2 45 0 0.000 1.800 0.000 E94 " "2:*-2-45-* Def\n" "0 2 2 45 32 0.000 2.212 0.000 X94\n" "0 5 2 45 32 0.000 2.225 0.000 X94\n" "0 0 2 46 0 0.000 1.800 0.000 E94 " "0:*-2-46-* Def\n" "2 0 2 46 0 0.000 1.800 0.000 E94 " "2:*-2-46-* Def\n" "0 0 2 55 0 0.000 4.800 0.000 E94 " "0:*-2-55-* Def\n" "0 0 2 56 0 0.000 4.800 0.000 E94 " "0:*-2-56-* Def\n" "0 0 2 62 0 0.000 8.000 0.000 C94 " "0:*-2-62-* Def\n" "0 2 2 62 23 1.693 7.903 0.532 X94\n" "0 5 2 62 23 -1.696 7.897 -0.482 X94\n" "1 0 2 63 0 0.000 1.800 0.000 E94 " "1:*-2-63-* Def\n" "1 0 2 64 0 0.000 1.800 0.000 E94 " "1:*-2-64-* Def\n" "1 0 2 67 0 0.000 1.800 0.000 E94 " "1:*-2-67-* Def\n" "1 0 2 81 0 0.000 4.800 0.000 E94 " "1:*-2-81-* Def\n" "1 0 3 3 0 0.000 0.600 0.000 C94 " "0:*-3-3-* Def\n" "4 0 3 3 0 0.000 1.800 0.000 E94 " "4:*-3-3-* Def\n" "1 1 3 3 1 -0.486 0.714 0.000 C94\n" "1 1 3 3 6 -0.081 -0.125 0.132 C94\n" "1 1 3 3 7 1.053 1.327 0.000 C94\n" "1 5 3 3 6 0.000 0.188 0.436 C94\n" "1 5 3 3 7 0.000 0.177 -0.412 C94\n" "1 6 3 3 6 0.269 0.437 0.000 C94\n" "1 6 3 3 7 -0.495 0.793 -0.318 C94\n" "1 7 3 3 7 -0.260 1.084 0.193 C94\n" "0 0 3 6 0 0.000 5.500 0.000 C94 " "0:*-3-6-* Def\n" "2 0 3 6 0 0.000 5.500 0.000 C94 " "2:*-3-6-* Def\n" "4 0 3 6 0 0.000 3.600 0.000 E94 " "4:*-3-6-* Def\n" "5 0 3 6 0 0.000 3.600 0.000 E94 " "5:*-3-6-* Def\n" "0 1 3 6 1 -1.244 5.482 0.365 C94\n" "0 1 3 6 24 -1.166 5.078 -0.545 C94\n" "0 1 3 6 37 -0.677 5.854 0.521 C94\n" "2 2 3 6 24 0.256 4.519 0.258 C94\n" "2 3 3 6 24 1.663 4.073 0.094 C94\n" "0 5 3 6 1 0.526 5.631 0.691 C94\n" "0 5 3 6 2 0.159 6.586 0.216 C94\n" "0 5 3 6 24 -2.285 4.737 0.468 C94\n" "0 7 3 6 0 0.700 6.500 -0.400 C94 " "0:7-3-6-* Def\n" "0 7 3 6 1 0.682 7.184 -0.935 C94\n" "0 7 3 6 2 -0.168 6.572 -0.151 C94\n" "0 7 3 6 24 1.662 6.152 -0.058 C94\n" "0 7 3 6 37 0.635 5.890 -0.446 C94\n" "2 37 3 6 24 0.000 3.892 -0.094 C94\n" "0 0 3 9 0 0.000 16.000 0.000 C94 " "0:*-3=9-* Def\n" "1 0 3 9 0 0.000 1.800 0.000 E94 " "1:*-3-9-* Def\n" "5 0 3 9 0 0.000 12.000 0.000 E94 " "5:*-3-9-* Def\n" "0 2 3 9 27 0.000 16.000 0.000 C94\n" "0 5 3 9 1 0.687 16.152 0.894 C94\n" "0 5 3 9 27 0.000 16.000 0.000 C94\n" "0 40 3 9 1 -0.758 18.216 -0.188 C94\n" "0 40 3 9 27 0.000 16.000 0.000 C94\n" "0 0 3 10 0 0.000 6.000 0.000 C94 " "0:*-3-10-* Def\n" "2 0 3 10 0 0.000 6.000 0.000 C94 " "2:*-3-10-* Def\n" "4 0 3 10 0 0.000 6.000 0.000 C94 " "4:*-3-10-* Def\n" "5 0 3 10 0 0.000 6.000 0.000 E94 " "5:*-3-10-* Def\n" "0 1 3 10 1 0.647 6.159 0.507 C94\n" "0 1 3 10 6 -1.035 8.791 1.464 C94\n" "0 1 3 10 28 -0.294 5.805 1.342 C94\n" "2 2 3 10 28 -0.287 7.142 0.120 C94\n" "0 5 3 10 1 -0.183 6.314 1.753 C94\n" "0 5 3 10 3 -0.751 5.348 0.209 C94\n" "0 5 3 10 28 -0.388 5.972 0.459 C94\n" "0 7 3 10 1 -0.319 6.294 -0.147 C94\n" "0 7 3 10 3 0.776 -0.585 -0.145 C94\n" "0 7 3 10 6 1.107 8.631 -0.452 C94\n" "0 7 3 10 28 1.435 4.975 -0.454 C94\n" "0 10 3 10 28 0.000 3.495 1.291 C94\n" "0 0 3 15 0 0.000 1.423 0.000 E94 " "0:*-3-15-* Def\n" "2 0 3 15 0 0.000 1.423 0.000 E94 " "2:*-3-15-* Def\n" "4 0 3 15 0 0.000 1.423 0.000 E94 " "4:*-3-15-* Def\n" "5 0 3 15 0 0.000 1.423 0.000 E94 " "5:*-3-15-* Def\n" "0 0 3 17 0 0.000 1.423 0.000 E94 " "0:*-3-17-* Def\n" "5 0 3 17 0 0.000 1.423 0.000 E94 " "5:*-3-17-* Def\n" "0 0 3 18 0 0.000 0.000 0.000 E94 " "0:*-3-18-* Def\n" "2 0 3 18 0 0.000 0.000 0.000 E94 " "2:*-3-18-* Def\n" "0 0 3 20 0 0.000 0.000 -0.300 C94 " "0:*-3-20-* Def\n" "2 0 3 20 0 0.000 0.000 0.000 E94 " "2:*-3-20-* Def\n" "4 0 3 20 0 0.000 0.000 -0.300 C94 " "4:*-3-20-* Def\n" "5 0 3 20 0 0.000 0.000 0.000 E94 " "5:*-3-20-* Def\n" "0 7 3 20 0 0.000 0.400 0.400 C94 " "0:7-3-20-* Def\n" "0 7 3 20 5 0.000 0.000 -0.131 C94\n" "0 7 3 20 20 0.000 0.000 0.000 C94\n" "0 20 3 20 5 0.000 0.000 0.085 C94\n" "0 20 3 20 20 0.000 0.000 0.000 C94\n" "0 0 3 22 0 0.000 0.000 0.000 E94 " "0:*-3-22-* Def\n" "2 0 3 22 0 0.000 0.000 0.000 E94 " "2:*-3-22-* Def\n" "4 0 3 22 0 0.000 0.000 0.000 E94 " "4:*-3-22-* Def\n" "5 0 3 22 0 0.000 0.000 0.000 E94 " "5:*-3-22-* Def\n" "0 7 3 22 0 0.000 0.400 0.400 C94 " "0:7-3-22-* Def\n" "0 0 3 25 0 0.000 0.000 0.000 E94 " "0:*-3-25-* Def\n" "2 0 3 25 0 0.000 0.000 0.000 E94 " "2:*-3-25-* Def\n" "1 0 3 30 0 0.000 1.800 0.000 E94 " "1:*-3-30-* Def\n" "4 0 3 30 0 0.000 1.800 0.000 E94 " "4:*-3-30-* Def\n" "1 0 3 37 0 0.000 2.500 0.000 #E94 " "1:*-3-37-* Def\n" "4 0 3 37 0 0.000 1.800 0.000 E94 " "4:*-3-37-* Def\n" "1 1 3 37 37 0.000 2.428 0.000 C94\n" "1 6 3 37 37 0.000 1.743 0.000 C94\n" "1 7 3 37 37 0.000 2.256 0.000 C94\n" "1 43 3 37 37 -0.241 3.385 -0.838 X94\n" "1 0 3 39 0 0.000 5.500 0.000 #E94 " "1:*-3-39-* Def\n" "0 0 3 40 0 0.000 3.900 0.000 C94 " "0:*-3-40-* Def\n" "2 0 3 40 0 0.000 3.600 0.000 E94 " "2:*-3-40-* Def\n" "5 0 3 40 0 0.000 3.600 0.000 E94 " "5:*-3-40-* Def\n" "0 5 3 40 28 -1.477 4.362 0.902 C94\n" "0 9 3 40 28 1.496 4.369 -0.417 C94\n" "0 40 3 40 28 0.178 3.149 0.778 C94\n" "0 0 3 41 0 0.000 1.800 0.000 E94 " "0:*-3-41-* Def\n" "2 0 3 41 0 0.000 1.800 0.000 E94 " "2:*-3-41-* Def\n" "0 0 3 43 0 0.000 4.500 0.000 C94 " "0:*-3-43-* Def\n" "2 0 3 43 0 0.000 3.600 0.000 E94 " "2:*-3-43-* Def\n" "4 0 3 43 0 0.000 3.600 0.000 E94 " "4:*-3-43-* Def\n" "5 0 3 43 0 0.000 3.600 0.000 E94 " "5:*-3-43-* Def\n" "0 1 3 43 18 1.712 3.309 0.233 X94\n" "0 1 3 43 28 -0.414 4.168 -0.875 X94\n" "0 7 3 43 18 -0.880 5.091 -0.129 X94\n" "0 7 3 43 28 0.536 5.276 -0.556 X94\n" "2 37 3 43 18 -0.701 4.871 1.225 X94\n" "2 37 3 43 28 -0.086 5.073 0.878 X94\n" "0 0 3 45 0 0.000 1.800 0.000 E94 " "0:*-3-45-* Def\n" "2 0 3 45 0 0.000 1.800 0.000 E94 " "2:*-3-45-* Def\n" "0 0 3 48 0 0.000 0.000 0.892 E94 " "0:*-3-48-* Def\n" "0 0 3 51 0 0.000 13.500 0.000 C94 " "0:*-3-51-* Def\n" "0 1 3 51 52 0.000 13.549 0.000 X94\n" "0 0 3 54 0 0.000 8.000 0.000 C94 " "0:*-3-54-* Def\n" "1 0 3 54 0 0.000 2.500 0.000 #E94 " "1:*-3-54-* Def\n" "5 0 3 54 0 0.000 12.000 0.000 E94 " "5:*-3-54-* Def\n" "0 5 3 54 1 0.000 8.000 0.000 C94\n" "0 5 3 54 36 0.000 8.000 0.000 C94\n" "0 0 3 55 0 0.000 4.800 0.000 E94 " "0:*-3-55-* Def\n" "2 0 3 55 0 0.000 4.800 0.000 E94 " "2:*-3-55-* Def\n" "0 0 3 56 0 0.000 4.800 0.000 E94 " "0:*-3-56-* Def\n" "2 0 3 56 0 0.000 4.800 0.000 E94 " "2:*-3-56-* Def\n" "1 0 3 57 0 0.000 2.500 0.000 #E94 " "1:*-3-57-* Def\n" "1 0 3 58 0 0.000 4.800 0.000 E94 " "1:*-3-58-* Def\n" "0 0 3 62 0 0.000 3.600 0.000 E94 " "0:*-3-62-* Def\n" "2 0 3 62 0 0.000 3.600 0.000 E94 " "2:*-3-62-* Def\n" "5 0 3 62 0 0.000 3.600 0.000 E94 " "5:*-3-62-* Def\n" "1 0 3 63 0 0.000 2.500 0.000 #E94 " "1:*-3-63-* Def\n" "1 0 3 64 0 0.000 2.500 0.000 #E94 " "1:*-3-64-* Def\n" "0 0 3 67 0 0.000 12.000 0.000 E94 " "0:*-3-67-* Def\n" "0 0 3 74 0 0.000 19.000 0.000 C94 " "0:*-3-74-* Def\n" "0 1 3 74 7 0.000 19.349 0.000 X94\n" "0 0 3 75 0 0.000 19.000 0.000 C94 " "0:*-3-75-* Def\n" "0 1 3 75 71 0.000 18.751 0.000 X94\n" "1 0 3 78 0 0.000 2.500 0.000 #E94 " "1:*-3-78-* Def\n" "1 0 3 80 0 0.000 2.500 0.000 #E94 " "1:*-3-80-* Def\n" "0 0 6 6 0 0.000 -2.000 0.000 E94 " "0:*-6-6-* Def\n" "5 0 6 6 0 0.000 -2.000 0.000 E94 " "5:*-6-6-* Def\n" "0 0 6 8 0 0.900 -1.100 -0.500 C94 " "0:*-6-8-* Def\n" "5 0 6 8 0 0.000 0.000 0.274 E94 " "5:*-6-8-* Def\n" "0 21 6 8 1 0.261 -0.330 -0.542 C94\n" "0 21 6 8 23 1.503 -1.853 -0.476 C94\n" "0 0 6 9 0 0.000 3.600 0.000 E94 " "0:*-6-9-* Def\n" "5 0 6 9 0 0.000 3.600 0.000 E94 " "5:*-6-9-* Def\n" "0 0 6 10 0 1.200 0.500 -1.000 C94 " "0:*-6-10-* Def\n" "0 21 6 10 1 0.875 0.180 -0.733 C94\n" "0 21 6 10 3 0.529 0.000 -1.163 C94\n" "0 0 6 15 0 0.000 -4.000 0.000 E94 " "0:*-6-15-* Def\n" "0 0 6 17 0 0.000 1.423 0.000 E94 " "0:*-6-17-* Def\n" "5 0 6 17 0 0.000 1.423 0.000 E94 " "5:*-6-17-* Def\n" "0 0 6 18 0 0.000 0.000 0.100 C94 " "0:*-6-18-* Def\n" "5 0 6 18 0 0.000 0.000 0.103 E94 " "5:*-6-18-* Def\n" "0 33 6 18 1 -0.520 -0.471 -0.267 X94\n" "0 33 6 18 6 -1.623 0.204 0.438 X94\n" "0 33 6 18 32 1.616 0.425 0.191 X94\n" "0 0 6 19 0 0.000 0.000 0.150 C94 " "0:*-6-19-* Def\n" "5 0 6 19 0 0.000 0.000 0.165 E94 " "5:*-6-19-* Def\n" "0 21 6 19 1 -0.620 -0.329 0.303 X94\n" "0 21 6 19 5 0.683 0.220 0.000 X94\n" "0 0 6 20 0 0.000 0.000 0.400 C94 " "0:*-6-20-* Def\n" "4 0 6 20 0 0.000 0.000 0.217 E94 " "4:*-6-20-* Def\n" "5 0 6 20 0 0.000 0.000 0.217 E94 " "5:*-6-20-* Def\n" "0 20 6 20 5 0.000 0.000 -0.079 C94\n" "4 20 6 20 20 0.000 0.000 0.000 C94\n" "0 0 6 22 0 0.000 0.000 0.217 E94 " "0:*-6-22-* Def\n" "0 0 6 25 0 0.000 0.000 0.650 C94 " "0:*-6-25-* Def\n" "5 0 6 25 0 0.000 0.000 0.231 E94 " "5:*-6-25-* Def\n" "0 1 6 25 1 -1.704 -0.452 0.556 X94\n" "0 1 6 25 6 0.000 0.000 0.777 X94\n" "0 1 6 25 32 1.205 0.914 0.612 X94\n" "0 24 6 25 6 -3.209 -7.622 1.065 X94\n" "0 24 6 25 32 -5.891 -3.332 0.290 X94\n" "0 0 6 26 0 0.000 0.000 0.346 E94 " "0:*-6-26-* Def\n" "0 0 6 30 0 0.000 3.600 0.000 E94 " "0:*-6-30-* Def\n" "2 0 6 30 0 0.000 3.600 0.000 E94 " "2:*-6-30-* Def\n" "0 0 6 37 0 0.000 3.200 0.000 C94 " "0:*-6-37-* Def\n" "5 0 6 37 0 0.000 3.600 0.000 E94 " "5:*-6-37-* Def\n" "0 1 6 37 37 0.000 4.382 0.000 C94\n" "0 3 6 37 37 0.000 2.576 0.000 C94\n" "0 29 6 37 37 0.000 2.801 0.000 C94\n" "0 0 6 39 0 0.000 0.000 0.000 E94 " "0:*-6-39-* Def\n" "0 0 6 40 0 0.000 0.000 0.274 E94 " "0:*-6-40-* Def\n" "0 0 6 41 0 0.000 3.600 0.000 E94 " "0:*-6-41-* Def\n" "0 0 6 43 0 0.000 0.000 0.274 E94 " "0:*-6-43-* Def\n" "0 0 6 45 0 0.000 6.000 0.000 C94 " "0:*-6-45-* Def\n" "0 1 6 45 32 0.000 6.208 0.000 X94\n" "0 0 6 54 0 0.000 3.600 0.000 E94 " "0:*-6-54-* Def\n" "0 0 6 55 0 0.000 3.600 0.000 E94 " "0:*-6-55-* Def\n" "0 0 6 57 0 0.000 3.600 0.000 E94 " "0:*-6-57-* Def\n" "0 0 6 58 0 0.000 3.600 0.000 E94 " "0:*-6-58-* Def\n" "0 0 6 63 0 0.000 3.600 0.000 E94 " "0:*-6-63-* Def\n" "0 0 6 64 0 0.000 3.600 0.000 E94 " "0:*-6-64-* Def\n" "0 0 8 8 0 0.000 0.000 0.375 E94 " "0:*-8-8-* Def\n" "5 0 8 8 0 0.000 0.000 0.375 E94 " "5:*-8-8-* Def\n" "0 0 8 9 0 0.000 3.600 0.000 E94 " "0:*-8-9-* Def\n" "5 0 8 9 0 0.000 3.600 0.000 E94 " "5:*-8-9-* Def\n" "0 0 8 10 0 0.000 0.000 0.000 E94 " "0:*-8-10-* Def\n" "4 0 8 10 0 0.000 0.000 0.000 E94 " "4:*-8-10-* Def\n" "0 0 8 15 0 0.000 0.000 0.424 E94 " "0:*-8-15-* Def\n" "0 0 8 17 0 0.000 1.423 0.000 E94 " "0:*-8-17-* Def\n" "4 0 8 17 0 0.000 1.423 0.000 E94 " "4:*-8-17-* Def\n" "5 0 8 17 0 0.000 1.423 0.000 E94 " "5:*-8-17-* Def\n" "0 0 8 19 0 0.000 0.000 0.225 E94 " "0:*-8-19-* Def\n" "0 0 8 20 0 0.000 0.000 0.350 C94 " "0:*-8-20-* Def\n" "4 0 8 20 0 0.000 0.000 0.300 C94 " "4:*-8-20-* Def\n" "5 0 8 20 0 0.000 0.000 0.297 E94 " "5:*-8-20-* Def\n" "0 20 8 20 5 0.000 0.120 0.472 C94\n" "4 20 8 20 20 0.000 -0.097 0.200 C94\n" "0 23 8 20 5 -0.101 -0.324 0.371 C94\n" "0 23 8 20 20 0.107 0.253 0.151 C94\n" "0 0 8 22 0 0.000 0.000 0.297 E94 " "0:*-8-22-* Def\n" "0 0 8 25 0 0.000 0.000 0.316 E94 " "0:*-8-25-* Def\n" "5 0 8 25 0 0.000 0.000 0.316 E94 " "5:*-8-25-* Def\n" "0 0 8 26 0 0.000 0.000 0.474 E94 " "0:*-8-26-* Def\n" "5 0 8 26 0 0.000 0.000 0.474 E94 " "5:*-8-26-* Def\n" "0 0 8 34 0 0.000 0.000 0.250 E94 " "0:*-8-34-* Def\n" "0 0 8 39 0 0.000 0.000 0.000 E94 " "0:*-8-39-* Def\n" "0 0 8 40 0 0.000 0.000 0.375 E94 " "0:*-8-40-* Def\n" "0 0 8 43 0 0.000 0.000 0.375 E94 " "0:*-8-43-* Def\n" "0 0 8 45 0 0.000 3.600 0.000 E94 " "0:*-8-45-* Def\n" "0 0 8 46 0 0.000 3.600 0.000 E94 " "0:*-8-46-* Def\n" "0 0 8 55 0 0.000 3.600 0.000 E94 " "0:*-8-55-* Def\n" "0 0 8 56 0 0.000 3.600 0.000 E94 " "0:*-8-56-* Def\n" "0 0 9 9 0 0.000 12.000 0.000 E94 " "0:*-9-9-* Def\n" "1 0 9 9 0 0.000 1.800 0.000 E94 " "1:*-9-9-* Def\n" "5 0 9 9 0 0.000 12.000 0.000 E94 " "5:*-9-9-* Def\n" "0 0 9 10 0 0.000 6.000 0.000 E94 " "0:*-9-10-* Def\n" "5 0 9 10 0 0.000 6.000 0.000 E94 " "5:*-9-10-* Def\n" "0 0 9 15 0 0.000 1.423 0.000 E94 " "0:*-9-15-* Def\n" "0 0 9 18 0 0.000 0.000 0.000 E94 " "0:*-9-18-* Def\n" "0 0 9 19 0 0.000 0.000 0.000 E94 " "0:*-9-19-* Def\n" "0 0 9 20 0 0.000 0.000 0.000 E94 " "0:*-9-20-* Def\n" "0 0 9 25 0 0.000 0.000 0.000 E94 " "0:*-9-25-* Def\n" "0 0 9 34 0 0.000 0.000 0.000 E94 " "0:*-9-34-* Def\n" "5 0 9 34 0 0.000 0.000 0.000 E94 " "5:*-9-34-* Def\n" "1 0 9 37 0 0.000 1.800 0.000 E94 " "1:*-9-37-* Def\n" "1 0 9 39 0 0.000 6.000 0.000 E94 " "1:*-9-39-* Def\n" "0 0 9 40 0 0.000 3.600 0.000 E94 " "0:*-9-40-* Def\n" "0 0 9 41 0 0.000 4.800 0.000 E94 " "0:*-9-41-* Def\n" "0 0 9 45 0 0.000 1.800 0.000 E94 " "0:*-9-45-* Def\n" "0 0 9 54 0 0.000 12.000 0.000 E94 " "0:*-9-54-* Def\n" "0 0 9 55 0 0.000 4.800 0.000 E94 " "0:*-9-55-* Def\n" "0 0 9 56 0 0.000 4.800 0.000 E94 " "0:*-9-56-* Def\n" "1 0 9 57 0 0.000 1.800 0.000 E94 " "1:*-9-57-* Def\n" "0 0 9 62 0 0.000 3.600 0.000 E94 " "0:*-9-62-* Def\n" "1 0 9 63 0 0.000 1.800 0.000 E94 " "1:*-9-63-* Def\n" "1 0 9 64 0 0.000 1.800 0.000 E94 " "1:*-9-64-* Def\n" "0 0 9 67 0 0.000 12.000 0.000 E94 " "0:*-9-67-* Def\n" "1 0 9 78 0 0.000 1.800 0.000 E94 " "1:*-9-78-* Def\n" "1 0 9 81 0 0.000 4.800 0.000 E94 " "1:*-9-81-* Def\n" "0 0 10 10 0 0.000 0.000 0.000 E94 " "0:*-10-10-* Def\n" "5 0 10 10 0 0.000 0.000 0.000 E94 " "5:*-10-10-* Def\n" "0 0 10 15 0 0.000 0.000 0.000 E94 " "0:*-10-15-* Def\n" "0 0 10 17 0 0.000 4.743 0.000 E94 " "0:*-10-17-* Def\n" "0 0 10 20 0 0.000 0.000 0.000 E94 " "0:*-10-20-* Def\n" "4 0 10 20 0 0.000 0.000 0.000 E94 " "4:*-10-20-* Def\n" "5 0 10 20 0 0.000 0.000 0.000 E94 " "5:*-10-20-* Def\n" "0 0 10 22 0 0.000 0.000 0.000 E94 " "0:*-10-22-* Def\n" "0 0 10 25 0 0.000 0.000 0.000 E94 " "0:*-10-25-* Def\n" "0 0 10 26 0 0.000 0.000 0.000 E94 " "0:*-10-26-* Def\n" "5 0 10 26 0 0.000 0.000 0.000 E94 " "5:*-10-26-* Def\n" "0 0 10 34 0 0.000 0.000 0.000 E94 " "0:*-10-34-* Def\n" "0 0 10 37 0 0.000 6.000 0.000 E94 " "0:*-10-37-* Def\n" "0 0 10 39 0 0.000 0.000 0.000 E94 " "0:*-10-39-* Def\n" "0 0 10 40 0 0.000 0.000 0.000 E94 " "0:*-10-40-* Def\n" "5 0 10 40 0 0.000 0.000 0.000 E94 " "5:*-10-40-* Def\n" "0 0 10 41 0 0.000 6.000 0.000 E94 " "0:*-10-41-* Def\n" "0 0 10 45 0 0.000 6.000 0.000 E94 " "0:*-10-45-* Def\n" "0 0 10 63 0 0.000 6.000 0.000 E94 " "0:*-10-63-* Def\n" "0 0 10 64 0 0.000 6.000 0.000 E94 " "0:*-10-64-* Def\n" "0 0 15 15 0 -1.400 -8.300 1.000 C94 " "0:*-15-15-* Def\n" "5 0 15 15 0 0.000 -8.000 0.000 E94 " "5:*-15-15-* Def\n" "0 1 15 15 1 -1.663 -8.408 1.433 C94\n" "0 1 15 15 71 -1.088 -8.245 0.411 C94\n" "0 0 15 18 0 0.000 0.000 0.160 E94 " "0:*-15-18-* Def\n" "0 0 15 19 0 0.000 0.000 0.255 E94 " "0:*-15-19-* Def\n" "5 0 15 19 0 0.000 0.000 0.255 E94 " "5:*-15-19-* Def\n" "0 0 15 20 0 0.000 0.000 0.336 E94 " "0:*-15-20-* Def\n" "4 0 15 20 0 0.000 0.000 0.336 E94 " "4:*-15-20-* Def\n" "0 0 15 22 0 0.000 0.000 0.336 E94 " "0:*-15-22-* Def\n" "0 0 15 25 0 0.000 0.000 0.358 E94 " "0:*-15-25-* Def\n" "4 0 15 25 0 0.000 0.000 0.358 E94 " "4:*-15-25-* Def\n" "0 0 15 26 0 0.000 0.000 0.537 E94 " "0:*-15-26-* Def\n" "0 0 15 30 0 0.000 1.423 0.000 E94 " "0:*-15-30-* Def\n" "4 0 15 30 0 0.000 1.423 0.000 E94 " "4:*-15-30-* Def\n" "0 0 15 37 0 0.000 1.300 0.000 C94 " "0:*-15-37-* Def\n" "5 0 15 37 0 0.000 1.423 0.000 E94 " "5:*-15-37-* Def\n" "0 1 15 37 37 0.000 2.177 0.000 C94\n" "0 71 15 37 37 0.000 0.505 0.333 C94\n" "0 0 15 40 0 0.000 0.000 0.424 E94 " "0:*-15-40-* Def\n" "0 0 15 43 0 0.000 0.000 0.424 E94 " "0:*-15-43-* Def\n" "0 0 15 57 0 0.000 1.423 0.000 E94 " "0:*-15-57-* Def\n" "0 0 15 63 0 0.000 1.423 0.000 E94 " "0:*-15-63-* Def\n" "0 0 15 64 0 0.000 1.423 0.000 E94 " "0:*-15-64-* Def\n" "0 0 17 20 0 0.000 0.000 0.000 E94 " "0:*-17-20-* Def\n" "4 0 17 20 0 0.000 0.000 0.000 E94 " "4:*-17-20-* Def\n" "5 0 17 20 0 0.000 0.000 0.000 E94 " "5:*-17-20-* Def\n" "0 0 17 22 0 0.000 0.000 0.000 E94 " "0:*-17-22-* Def\n" "0 0 17 37 0 0.000 1.423 0.000 E94 " "0:*-17-37-* Def\n" "0 0 17 43 0 0.000 3.795 0.000 E94 " "0:*-17-43-* Def\n" "0 0 18 20 0 0.000 0.000 0.112 E94 " "0:*-18-20-* Def\n" "4 0 18 20 0 0.000 0.000 0.112 E94 " "4:*-18-20-* Def\n" "5 0 18 20 0 0.000 0.000 0.112 E94 " "5:*-18-20-* Def\n" "0 0 18 22 0 0.000 0.000 0.112 E94 " "0:*-18-22-* Def\n" "0 0 18 37 0 0.000 -1.200 -0.300 C94 " "0:*-18-37-* Def\n" "0 32 18 37 37 -0.173 -0.965 -0.610 X94\n" "0 39 18 37 37 0.000 -0.760 0.227 X94\n" "0 43 18 37 37 0.228 -1.741 -0.371 X94\n" "0 0 18 39 0 0.000 0.000 0.500 C94 " "0:*-18-39-* Def\n" "0 32 18 39 63 0.000 0.687 0.680 X94\n" "0 37 18 39 63 0.000 -0.513 0.357 X94\n" "0 0 18 43 0 0.000 0.000 0.350 C94 " "0:*-18-43-* Def\n" "4 0 18 43 0 0.000 0.000 0.141 E94 " "4:*-18-43-* Def\n" "5 0 18 43 0 0.000 0.000 0.141 E94 " "5:*-18-43-* Def\n" "0 1 18 43 1 -0.914 -0.482 0.179 X94\n" "0 1 18 43 3 -0.392 -2.724 0.312 X94\n" "0 1 18 43 28 -1.508 -1.816 -0.175 X94\n" "0 1 18 43 37 0.823 -1.220 -0.770 X94\n" "0 32 18 43 1 1.588 1.499 1.410 X94\n" "0 32 18 43 3 0.653 0.254 0.000 X94\n" "0 32 18 43 28 0.528 0.342 0.000 X94\n" "0 32 18 43 37 0.812 1.513 1.266 X94\n" "0 37 18 43 1 -1.139 -0.703 1.088 X94\n" "0 37 18 43 28 -2.014 -1.646 -2.068 X94\n" "0 37 18 43 37 -1.519 -0.328 1.437 X94\n" "0 43 18 43 28 3.011 -1.405 2.038 X94\n" "0 0 18 48 0 0.000 0.000 0.400 C94 " "0:*-18-48-* Def\n" "0 1 18 48 28 1.767 1.606 0.408 X94\n" "0 32 18 48 28 -1.463 -2.548 0.310 X94\n" "0 0 18 55 0 0.000 0.000 0.000 E94 " "0:*-18-55-* Def\n" "0 0 18 58 0 0.000 0.000 0.000 E94 " "0:*-18-58-* Def\n" "0 0 18 62 0 0.000 0.000 0.500 C94 " "0:*-18-62-* Def\n" "0 1 18 62 1 -0.403 -0.273 0.440 X94\n" "0 32 18 62 1 0.291 0.385 0.582 X94\n" "0 0 18 63 0 0.000 0.000 0.000 E94 " "0:*-18-63-* Def\n" "0 0 18 64 0 0.000 0.000 0.000 E94 " "0:*-18-64-* Def\n" "0 0 18 80 0 0.000 0.000 0.000 E94 " "0:*-18-80-* Def\n" "0 0 19 20 0 0.000 0.000 0.179 E94 " "0:*-19-20-* Def\n" "4 0 19 20 0 0.000 0.000 0.179 E94 " "4:*-19-20-* Def\n" "0 0 19 37 0 0.000 0.000 0.000 E94 " "0:*-19-37-* Def\n" "0 0 19 40 0 0.000 0.000 0.225 E94 " "0:*-19-40-* Def\n" "0 0 19 63 0 0.000 0.000 0.000 E94 " "0:*-19-63-* Def\n" "0 0 19 75 0 0.000 0.000 0.000 E94 " "0:*-19-75-* Def\n" "0 0 20 20 0 0.000 0.000 0.200 C94 " "0:*-20-20-* Def\n" "4 0 20 20 0 0.000 0.000 0.000 C94 " "4:*-20-20-* Def\n" "5 0 20 20 0 0.000 0.000 0.236 E94 " "5:*-20-20-* Def\n" "0 1 20 20 5 0.067 0.081 0.347 C94\n" "0 1 20 20 20 -0.063 -0.064 0.140 C94\n" "0 3 20 20 5 0.000 0.000 0.083 C94\n" "0 3 20 20 20 0.000 0.000 0.000 C94\n" "0 5 20 20 5 0.000 0.000 0.424 C94\n" "0 5 20 20 6 0.000 0.000 -0.080 C94\n" "0 5 20 20 8 0.000 0.127 0.450 C94\n" "0 5 20 20 12 -0.072 -0.269 0.439 C94\n" "0 5 20 20 20 -0.057 0.000 0.307 C94\n" "4 6 20 20 20 0.000 0.000 0.000 C94\n" "4 8 20 20 20 0.000 -0.091 0.192 C94\n" "0 12 20 20 20 0.077 0.202 0.183 C94\n" "4 20 20 20 20 0.000 0.000 0.000 C94\n" "0 0 20 22 0 0.000 0.000 0.236 E94 " "0:*-20-22-* Def\n" "4 0 20 22 0 0.000 0.000 0.236 E94 " "4:*-20-22-* Def\n" "0 0 20 25 0 0.000 0.000 0.251 E94 " "0:*-20-25-* Def\n" "4 0 20 25 0 0.000 0.000 0.251 E94 " "4:*-20-25-* Def\n" "0 0 20 26 0 0.000 0.000 0.376 E94 " "0:*-20-26-* Def\n" "4 0 20 26 0 0.000 0.000 0.376 E94 " "4:*-20-26-* Def\n" "5 0 20 26 0 0.000 0.000 0.376 E94 " "5:*-20-26-* Def\n" "0 0 20 30 0 0.000 0.000 0.000 E94 " "0:*-20-30-* Def\n" "2 0 20 30 0 0.000 0.000 0.000 E94 " "2:*-20-30-* Def\n" "4 0 20 30 0 0.000 0.000 0.000 E94 " "4:*-20-30-* Def\n" "0 0 20 30 30 0.000 0.000 -0.500 C94 " "0:*-20-30=30 Def\n" "0 0 20 34 0 0.000 0.000 0.198 E94 " "0:*-20-34-* Def\n" "4 0 20 34 0 0.000 0.000 0.198 E94 " "4:*-20-34-* Def\n" "0 0 20 37 0 0.000 0.000 0.000 E94 " "0:*-20-37-* Def\n" "4 0 20 37 0 0.000 0.000 0.000 E94 " "4:*-20-37-* Def\n" "0 0 20 40 0 0.000 0.000 0.297 E94 " "0:*-20-40-* Def\n" "0 0 20 41 0 0.000 0.000 0.000 E94 " "0:*-20-41-* Def\n" "0 0 20 43 0 0.000 0.000 0.297 E94 " "0:*-20-43-* Def\n" "4 0 20 43 0 0.000 0.000 0.297 E94 " "4:*-20-43-* Def\n" "0 0 20 45 0 0.000 0.000 0.000 E94 " "0:*-20-45-* Def\n" "0 0 22 22 0 0.000 0.000 0.236 E94 " "0:*-22-22-* Def\n" "4 0 22 22 0 0.000 0.000 0.236 E94 " "4:*-22-22-* Def\n" "5 0 22 22 0 0.000 0.000 0.236 E94 " "5:*-22-22-* Def\n" "0 0 22 30 0 0.000 0.000 0.000 E94 " "0:*-22-30-* Def\n" "4 0 22 30 0 0.000 0.000 0.000 E94 " "4:*-22-30-* Def\n" "0 0 22 34 0 0.000 0.000 0.198 E94 " "0:*-22-34-* Def\n" "0 0 22 37 0 0.000 0.000 0.000 E94 " "0:*-22-37-* Def\n" "0 0 22 40 0 0.000 0.000 0.297 E94 " "0:*-22-40-* Def\n" "0 0 22 41 0 0.000 0.000 0.000 E94 " "0:*-22-41-* Def\n" "0 0 22 43 0 0.000 0.000 0.297 E94 " "0:*-22-43-* Def\n" "5 0 22 43 0 0.000 0.000 0.297 E94 " "5:*-22-43-* Def\n" "0 0 22 45 0 0.000 0.000 0.000 E94 " "0:*-22-45-* Def\n" "0 0 25 25 0 0.000 0.000 0.267 E94 " "0:*-25-25-* Def\n" "0 0 25 37 0 0.000 0.000 0.000 E94 " "0:*-25-37-* Def\n" "5 0 25 37 0 0.000 0.000 0.000 E94 " "5:*-25-37-* Def\n" "0 0 25 39 0 0.000 0.000 0.000 E94 " "0:*-25-39-* Def\n" "0 0 25 40 0 0.000 0.000 0.316 E94 " "0:*-25-40-* Def\n" "5 0 25 40 0 0.000 0.000 0.316 E94 " "5:*-25-40-* Def\n" "0 0 25 43 0 0.000 0.000 0.250 C94 " "0:*-25-43-* Def\n" "0 1 25 43 1 -2.686 -1.512 0.591 X94\n" "0 1 25 43 28 -3.730 -0.531 0.000 X94\n" "0 32 25 43 1 2.108 1.896 0.965 X94\n" "0 32 25 43 28 2.977 0.732 -0.502 X94\n" "0 0 25 57 0 0.000 0.000 0.000 E94 " "0:*-25-57-* Def\n" "0 0 25 63 0 0.000 0.000 0.000 E94 " "0:*-25-63-* Def\n" "0 0 26 26 0 0.000 0.000 0.600 E94 " "0:*-26-26-* Def\n" "5 0 26 26 0 0.000 0.000 0.600 E94 " "5:*-26-26-* Def\n" "0 0 26 34 0 0.000 0.000 0.316 E94 " "0:*-26-34-* Def\n" "5 0 26 34 0 0.000 0.000 0.316 E94 " "5:*-26-34-* Def\n" "0 0 26 37 0 0.000 1.423 0.000 E94 " "0:*-26-37-* Def\n" "0 0 26 40 0 0.000 0.000 0.474 E94 " "0:*-26-40-* Def\n" "0 0 30 30 0 0.000 12.000 0.000 E94 " "0:*-30-30-* Def\n" "1 0 30 30 0 0.000 1.800 0.000 E94 " "1:*-30-30-* Def\n" "4 0 30 30 0 0.000 1.800 0.000 E94 " "4:*-30-30-* Def\n" "0 0 30 40 0 0.000 3.600 0.000 E94 " "0:*-30-40-* Def\n" "1 0 30 67 0 0.000 1.800 0.000 E94 " "1:*-30-67-* Def\n" "0 0 34 37 0 0.000 0.000 0.000 E94 " "0:*-34-37-* Def\n" "0 0 34 43 0 0.000 0.000 0.250 E94 " "0:*-34-43-* Def\n" "0 0 37 37 0 0.000 7.000 0.000 C94 " "0:*-37-37-* Def\n" "1 0 37 37 0 0.000 2.000 0.000 #E94 " "1:*-37-37-* Def\n" "4 0 37 37 0 0.000 6.000 0.000 E94 " "4:*-37-37-* Def\n" "5 0 37 37 0 0.000 6.000 0.000 E94 " "5:*-37-37-* Def\n" "0 1 37 37 5 0.000 7.000 0.000 C94\n" "0 1 37 37 37 0.000 7.000 0.000 C94\n" "0 2 37 37 5 0.000 7.000 0.000 C94\n" "0 2 37 37 37 0.000 7.000 0.000 C94\n" "0 3 37 37 5 0.000 7.000 0.000 C94\n" "0 3 37 37 37 0.000 7.000 0.000 C94\n" "0 5 37 37 5 0.000 7.000 0.000 C94\n" "0 5 37 37 6 0.000 7.000 0.000 C94\n" "0 5 37 37 15 0.000 7.000 0.000 C94\n" "0 5 37 37 18 0.000 7.000 0.000 X94\n" "0 5 37 37 37 0.000 7.000 0.000 C94\n" "0 5 37 37 40 0.000 7.000 0.000 C94\n" "0 5 37 37 43 0.000 7.000 0.000 X94\n" "0 6 37 37 37 0.000 7.000 0.000 C94\n" "0 15 37 37 37 0.000 7.000 0.000 C94\n" "0 18 37 37 37 0.000 7.000 0.000 X94\n" "0 37 37 37 37 0.000 7.000 0.000 C94\n" "0 37 37 37 40 0.000 7.000 0.000 C94\n" "0 37 37 37 43 0.000 7.000 0.000 X94\n" "0 0 37 38 0 0.000 7.000 0.000 C94 " "0:*-37-38-* Def\n" "0 0 37 39 0 0.000 3.600 0.000 E94 " "0:*-37-39-* Def\n" "1 0 37 39 0 0.000 6.000 0.000 E94 " "1:*-37-39-* Def\n" "0 0 37 40 0 0.000 4.000 0.000 C94 " "0:*-37-40-* Def\n" "5 0 37 40 0 0.000 3.600 0.000 E94 " "5:*-37-40-* Def\n" "0 37 37 40 1 0.000 4.336 0.370 C94\n" "0 37 37 40 28 0.715 2.628 3.355 C94\n" "0 0 37 41 0 0.000 1.800 0.000 E94 " "0:*-37-41-* Def\n" "0 0 37 43 0 0.000 2.000 1.800 C94 " "0:*-37-43-* Def\n" "5 0 37 43 0 0.000 3.600 0.000 E94 " "5:*-37-43-* Def\n" "0 37 37 43 18 0.372 2.284 2.034 X94\n" "0 37 37 43 28 0.000 1.694 1.508 X94\n" "0 0 37 45 0 0.000 1.800 0.000 E94 " "0:*-37-45-* Def\n" "0 0 37 46 0 0.000 1.800 0.000 E94 " "0:*-37-46-* Def\n" "0 0 37 55 0 0.000 4.800 0.000 E94 " "0:*-37-55-* Def\n" "0 0 37 56 0 0.000 4.800 0.000 E94 " "0:*-37-56-* Def\n" "1 0 37 57 0 0.000 1.800 0.000 E94 " "1:*-37-57-* Def\n" "0 0 37 58 0 0.000 6.000 0.000 E94 " "0:*-37-58-* Def\n" "1 0 37 58 0 0.000 4.800 0.000 E94 " "1:*-37-58-* Def\n" "0 0 37 62 0 0.000 3.600 0.000 E94 " "0:*-37-62-* Def\n" "0 0 37 63 0 0.000 7.000 0.000 C94 " "0:*-37-63-* Def\n" "1 0 37 63 0 0.000 1.800 0.000 E94 " "1:*-37-63-* Def\n" "0 0 37 64 0 0.000 7.000 0.000 C94 " "0:*-37-64-* Def\n" "1 0 37 64 0 0.000 1.800 0.000 E94 " "1:*-37-64-* Def\n" "1 0 37 67 0 0.000 1.800 0.000 E94 " "1:*-37-67-* Def\n" "0 0 37 69 0 0.000 7.000 0.000 C94 " "0:*-37-69-* Def\n" "0 0 37 78 0 0.000 6.000 0.000 E94 " "0:*-37-78-* Def\n" "0 0 37 81 0 0.000 6.000 0.000 E94 " "0:*-37-81-* Def\n" "1 0 37 81 0 0.000 4.800 0.000 E94 " "1:*-37-81-* Def\n" "0 0 38 38 0 0.000 7.000 0.000 C94 " "0:*-38-38-* Def\n" "0 0 38 58 0 0.000 7.000 0.000 C94 " "0:*-38-58-* Def\n" "0 0 38 63 0 0.000 7.000 0.000 C94 " "0:*-38-63-* Def\n" "0 0 38 64 0 0.000 7.000 0.000 C94 " "0:*-38-64-* Def\n" "0 0 38 69 0 0.000 6.000 0.000 E94 " "0:*-38-69-* Def\n" "0 0 38 78 0 0.000 6.000 0.000 E94 " "0:*-38-78-* Def\n" "0 0 39 40 0 0.000 0.000 0.000 E94 " "0:*-39-40-* Def\n" "0 0 39 45 0 0.000 6.000 0.000 E94 " "0:*-39-45-* Def\n" "0 0 39 63 0 0.000 4.000 0.000 C94 " "0:*-39-63-* Def\n" "1 0 39 63 0 0.000 6.000 0.000 E94 " "1:*-39-63-* Def\n" "5 0 39 63 0 0.000 3.600 0.000 E94 " "5:*-39-63-* Def\n" "0 1 39 63 5 0.000 4.000 0.000 C94\n" "0 1 39 63 64 0.000 4.000 0.000 C94\n" "0 18 39 63 5 0.000 4.000 0.000 X94\n" "0 18 39 63 64 0.000 4.000 0.000 X94\n" "0 63 39 63 5 0.000 4.000 0.000 C94\n" "0 63 39 63 64 0.000 4.000 0.000 C94\n" "0 0 39 64 0 0.000 3.600 0.000 E94 " "0:*-39-64-* Def\n" "1 0 39 64 0 0.000 6.000 0.000 E94 " "1:*-39-64-* Def\n" "0 0 39 65 0 0.000 4.000 0.000 C94 " "0:*-39-65-* Def\n" "0 0 39 78 0 0.000 3.600 0.000 E94 " "0:*-39-78-* Def\n" "0 0 40 40 0 0.000 0.000 0.375 E94 " "0:*-40-40-* Def\n" "0 0 40 45 0 0.000 3.600 0.000 E94 " "0:*-40-45-* Def\n" "0 0 40 46 0 0.000 3.600 0.000 E94 " "0:*-40-46-* Def\n" "0 0 40 54 0 0.000 3.600 0.000 E94 " "0:*-40-54-* Def\n" "2 0 40 54 0 0.000 3.600 0.000 E94 " "2:*-40-54-* Def\n" "0 0 40 63 0 0.000 3.600 0.000 E94 " "0:*-40-63-* Def\n" "0 0 40 64 0 0.000 3.600 0.000 E94 " "0:*-40-64-* Def\n" "0 0 40 78 0 0.000 3.600 0.000 E94 " "0:*-40-78-* Def\n" "0 0 41 41 0 0.000 1.800 0.000 E94 " "0:*-41-41-* Def\n" "0 0 41 55 0 0.000 4.800 0.000 E94 " "0:*-41-55-* Def\n" "0 0 41 62 0 0.000 3.600 0.000 E94 " "0:*-41-62-* Def\n" "0 0 41 80 0 0.000 1.800 0.000 E94 " "0:*-41-80-* Def\n" "0 0 43 43 0 0.000 0.000 0.375 E94 " "0:*-43-43-* Def\n" "0 0 43 45 0 0.000 3.600 0.000 E94 " "0:*-43-45-* Def\n" "0 0 43 64 0 0.000 3.600 0.000 E94 " "0:*-43-64-* Def\n" "0 0 44 57 0 0.000 7.000 0.000 C94 " "0:*-44-57-* Def\n" "0 0 44 63 0 0.000 7.000 0.000 C94 " "0:*-44-63-* Def\n" "0 0 44 65 0 0.000 7.000 0.000 C94 " "0:*-44-65-* Def\n" "0 0 44 78 0 0.000 2.846 0.000 E94 " "0:*-44-78-* Def\n" "0 0 44 80 0 0.000 2.846 0.000 E94 " "0:*-44-80-* Def\n" "0 0 45 63 0 0.000 1.800 0.000 E94 " "0:*-45-63-* Def\n" "0 0 45 64 0 0.000 1.800 0.000 E94 " "0:*-45-64-* Def\n" "0 0 45 78 0 0.000 1.800 0.000 E94 " "0:*-45-78-* Def\n" "0 0 55 57 0 0.000 10.000 0.000 C94 " "0:*-55-57-* Def\n" "2 0 55 57 0 0.000 4.800 0.000 E94 " "2:*-55-57-* Def\n" "5 0 55 57 0 0.000 4.800 0.000 E94 " "5:*-55-57-* Def\n" "0 1 55 57 5 0.423 12.064 0.090 C94\n" "0 1 55 57 55 -0.428 12.044 0.000 C94\n" "0 36 55 57 5 -0.268 8.077 -0.806 C94\n" "0 36 55 57 55 0.273 8.025 0.692 C94\n" "0 0 55 62 0 0.000 3.600 0.000 E94 " "0:*-55-62-* Def\n" "0 0 55 64 0 0.000 4.800 0.000 E94 " "0:*-55-64-* Def\n" "0 0 55 80 0 0.000 4.800 0.000 E94 " "0:*-55-80-* Def\n" "0 0 56 57 0 0.000 6.000 0.000 C94 " "0:*-56-57-* Def\n" "0 1 56 57 56 0.000 6.886 -0.161 C94\n" "0 36 56 57 56 0.000 4.688 0.107 C94\n" "0 0 56 63 0 0.000 4.800 0.000 E94 " "0:*-56-63-* Def\n" "0 0 56 80 0 0.000 4.800 0.000 E94 " "0:*-56-80-* Def\n" "1 0 57 63 0 0.000 1.800 0.000 E94 " "1:*-57-63-* Def\n" "1 0 57 64 0 0.000 1.800 0.000 E94 " "1:*-57-64-* Def\n" "0 0 58 63 0 0.000 6.000 0.000 E94 " "0:*-58-63-* Def\n" "0 0 58 64 0 0.000 6.000 0.000 E94 " "0:*-58-64-* Def\n" "0 0 59 63 0 0.000 7.000 0.000 C94 " "0:*-59-63-* Def\n" "0 0 59 65 0 0.000 7.000 0.000 C94 " "0:*-59-65-* Def\n" "0 0 59 78 0 0.000 3.600 0.000 E94 " "0:*-59-78-* Def\n" "0 0 59 80 0 0.000 3.600 0.000 E94 " "0:*-59-80-* Def\n" "0 0 59 82 0 0.000 3.600 0.000 E94 " "0:*-59-82-* Def\n" "0 0 62 63 0 0.000 3.600 0.000 E94 " "0:*-62-63-* Def\n" "0 0 62 64 0 0.000 3.600 0.000 E94 " "0:*-62-64-* Def\n" "1 0 63 63 0 0.000 1.800 0.000 E94 " "1:*-63-63-* Def\n" "0 0 63 64 0 0.000 7.000 0.000 C94 " "0:*-63-64-* Def\n" "0 5 63 64 5 0.000 7.000 0.000 C94\n" "0 5 63 64 64 0.000 7.000 0.000 C94\n" "0 39 63 64 5 0.000 7.000 0.000 C94\n" "0 39 63 64 64 0.000 7.000 0.000 C94\n" "0 0 63 66 0 0.000 7.000 0.000 C94 " "0:*-63-66-* Def\n" "0 0 63 78 0 0.000 6.000 0.000 E94 " "0:*-63-78-* Def\n" "0 0 63 81 0 0.000 6.000 0.000 E94 " "0:*-63-81-* Def\n" "0 0 64 64 0 0.000 7.000 0.000 C94 " "0:*-64-64-* Def\n" "1 0 64 64 0 0.000 1.800 0.000 E94 " "1:*-64-64-* Def\n" "0 5 64 64 5 0.000 7.000 0.000 C94\n" "0 5 64 64 63 0.000 7.000 0.000 C94\n" "0 63 64 64 63 0.000 7.000 0.000 C94\n" "0 0 64 65 0 0.000 7.000 0.000 C94 " "0:*-64-65-* Def\n" "0 0 64 66 0 0.000 7.000 0.000 C94 " "0:*-64-66-* Def\n" "0 0 64 78 0 0.000 6.000 0.000 E94 " "0:*-64-78-* Def\n" "0 0 64 81 0 0.000 6.000 0.000 E94 " "0:*-64-81-* Def\n" "5 0 64 81 0 0.000 6.000 0.000 E94 " "5:*-64-81-* Def\n" "0 0 64 82 0 0.000 6.000 0.000 E94 " "0:*-64-82-* Def\n" "0 0 65 66 0 0.000 7.000 0.000 C94 " "0:*-65-66-* Def\n" "0 0 65 78 0 0.000 6.000 0.000 E94 " "0:*-65-78-* Def\n" "0 0 65 81 0 0.000 6.000 0.000 E94 " "0:*-65-81-* Def\n" "0 0 65 82 0 0.000 6.000 0.000 E94 " "0:*-65-82-* Def\n" "0 0 66 66 0 0.000 7.000 0.000 C94 " "0:*-66-66-* Def\n" "0 0 66 78 0 0.000 6.000 0.000 E94 " "0:*-66-78-* Def\n" "0 0 66 81 0 0.000 6.000 0.000 E94 " "0:*-66-81-* Def\n" "0 0 67 67 0 0.000 12.000 0.000 E94 " "0:*-67-67-* Def\n" "5 0 67 67 0 0.000 12.000 0.000 E94 " "5:*-67-67-* Def\n" "0 0 76 76 0 0.000 3.600 0.000 E94 " "0:*-76-76-* Def\n" "0 0 76 78 0 0.000 3.600 0.000 E94 " "0:*-76-78-* Def\n" "0 0 78 78 0 0.000 7.000 0.000 C94 " "0:*-78-78-* Def\n" "0 0 78 79 0 0.000 6.000 0.000 E94 " "0:*-78-79-* Def\n" "0 0 78 81 0 0.000 4.000 0.000 C94 " "0:*-78-81-* Def\n" "0 0 79 79 0 0.000 6.000 0.000 E94 " "0:*-79-79-* Def\n" "0 0 79 81 0 0.000 6.000 0.000 E94 " "0:*-79-81-* Def\n" "0 0 80 81 0 0.000 4.000 0.000 C94 " "0:*-80-81-* Def\n"; const std::string defaultMMFFsTor = "*\n" "* Copyright (c) Merck and Co., Inc., 1994,1995,1996,1997,1998,1999\n" "* All Rights Reserved\n" "*\n" "* MMFF94s TORSION PARAMETERS\n" "* C94 - CORE MMFF94 parameter - from fits to conformational energies\n" "* C94S - CORE MMFF94s parameter - from fits to conformational energies\n" "* X94 - EXTD MMFF94 parameter - also from fits to conformational E's\n" "* E94 - EXTD MMFF94 parameter - from empirical rule\n" "* #E94 - Adjusted from empirical rule value\n" "*\n" "* atom types V1 V2 V3 Source\n" "0 0 1 1 0 0.000 0.000 0.300 C94 " "0:*-1-1-* Def\n" "5 0 1 1 0 0.200 -0.800 1.500 C94 " "5:*-1-1-* Def\n" "0 1 1 1 1 0.103 0.681 0.332 C94\n" "5 1 1 1 1 0.144 -0.547 1.126 C94\n" "0 1 1 1 2 -0.295 0.438 0.584 C94\n" "0 1 1 1 3 0.066 -0.156 0.143 C94\n" "0 1 1 1 5 0.639 -0.630 0.264 C94\n" "0 1 1 1 6 -0.688 1.757 0.477 C94\n" "5 1 1 1 6 0.000 0.000 0.054 C94\n" "0 1 1 1 8 -1.420 -0.092 1.101 C94\n" "5 1 1 1 8 0.000 -0.158 0.323 C94\n" "0 1 1 1 11 0.593 0.662 1.120 C94\n" "0 1 1 1 12 -0.678 0.417 0.624 C94\n" "0 1 1 1 15 -0.714 0.698 0.000 C94\n" "0 1 1 1 34 -0.647 0.550 0.590 C94\n" "0 2 1 1 5 0.321 -0.411 0.144 C94\n" "0 3 1 1 3 0.443 0.000 -1.140 C94\n" "0 3 1 1 5 -0.256 0.058 0.000 C94\n" "0 3 1 1 6 -0.679 -0.029 0.000 C94\n" "0 5 1 1 5 0.284 -1.386 0.314 C94\n" "0 5 1 1 6 -0.654 1.072 0.279 C94\n" "0 5 1 1 8 -0.744 -1.235 0.337 C94\n" "0 5 1 1 10 0.000 0.000 0.418 C94S\n" "0 5 1 1 11 0.000 0.516 0.291 C94\n" "0 5 1 1 12 0.678 -0.602 0.398 C94\n" "0 5 1 1 15 1.142 -0.644 0.367 C94\n" "0 5 1 1 25 0.000 0.000 0.295 X94\n" "0 5 1 1 34 0.692 -0.530 0.278 C94\n" "0 5 1 1 37 0.000 0.000 0.389 C94\n" "0 5 1 1 39 0.000 0.000 0.278 C94\n" "0 5 1 1 41 0.000 0.000 -0.141 C94\n" "0 5 1 1 56 0.000 0.000 0.324 C94\n" "0 5 1 1 68 0.000 0.000 0.136 C94\n" "0 6 1 1 6 0.408 1.397 0.961 C94\n" "5 6 1 1 6 0.313 -1.035 1.631 C94\n" "0 8 1 1 8 1.055 0.834 0.000 C94\n" "0 11 1 1 11 -0.387 -0.543 1.405 C94\n" "0 12 1 1 12 0.000 0.000 0.893 C94\n" "0 15 1 1 15 -0.177 0.000 0.049 C94\n" "0 0 1 2 0 0.000 0.000 0.000 C94 " "0:*-1-2-* Def\n" "2 0 1 2 0 0.000 0.000 0.000 E94 " "2:*1-2-* Def\n" "5 0 1 2 0 0.000 0.000 0.000 C94 " "5:*-1-2-* Def\n" "0 0 1 2 2 0.000 0.000 -0.650 C94 " "0:*-1-2=2 Def\n" "5 0 1 2 2 0.000 0.000 -0.650 C94 " "5:*-1-2=2 Def\n" "0 1 1 2 1 0.419 0.296 0.282 C94\n" "0 1 1 2 2 -0.494 0.274 -0.630 C94\n" "0 1 1 2 5 0.075 0.000 0.358 C94\n" "0 2 1 2 2 -0.293 0.115 -0.508 C94\n" "0 2 1 2 5 0.301 0.104 0.507 C94\n" "0 3 1 2 1 0.565 -0.554 0.234 C94\n" "0 3 1 2 2 -0.577 -0.482 -0.427 C94\n" "0 3 1 2 5 0.082 0.000 0.123 C94\n" "0 5 1 2 1 0.000 -0.184 0.220 C94\n" "0 5 1 2 2 0.501 -0.410 -0.535 C94\n" "2 5 1 2 2 0.000 0.000 0.055 C94\n" "2 5 1 2 3 0.000 0.000 -0.108 C94\n" "0 5 1 2 5 -0.523 -0.228 0.208 C94\n" "2 5 1 2 37 0.000 0.000 0.000 C94\n" "0 6 1 2 1 -0.467 0.000 0.490 C94\n" "0 6 1 2 2 0.425 0.168 -0.875 C94\n" "0 6 1 2 5 0.000 0.136 0.396 C94\n" "0 8 1 2 1 -0.504 0.371 0.557 C94\n" "0 8 1 2 2 0.541 0.539 -1.009 C94\n" "0 8 1 2 5 0.000 0.204 0.464 C94\n" "0 0 1 3 0 0.000 0.400 0.300 C94 " "0:*-1-3-* Def\n" "2 0 1 3 0 0.000 0.500 0.350 C94 " "2:*-1-3-* Def\n" "5 0 1 3 0 0.000 0.000 0.000 E94 " "5:*1-3-* Def\n" "0 0 1 3 1 0.000 0.000 0.550 C94 " "0:*-1-3-1 Def\n" "0 0 1 3 5 0.000 0.200 0.700 C94 " "0:*-1-3-5 Def\n" "0 0 1 3 7 0.000 0.400 0.400 C94 " "0:*-1-3-7 Def\n" "0 1 1 3 1 0.103 0.177 0.545 C94\n" "0 1 1 3 5 -0.072 0.316 0.674 C94\n" "0 1 1 3 6 -0.117 -0.333 0.202 C94\n" "0 1 1 3 7 0.825 0.139 0.325 C94\n" "0 1 1 3 10 -0.763 1.244 0.986 C94S\n" "0 2 1 3 5 0.663 -0.167 0.426 C94\n" "0 2 1 3 7 -0.758 0.112 0.563 C94\n" "0 5 1 3 1 -0.073 0.085 0.531 C94\n" "2 5 1 3 2 0.000 0.000 0.115 C94\n" "2 5 1 3 3 0.000 0.000 0.446 C94\n" "0 5 1 3 5 -0.822 0.501 1.008 C94\n" "0 5 1 3 6 0.000 -0.624 0.330 C94\n" "0 5 1 3 7 0.659 -1.407 0.308 C94\n" "0 5 1 3 10 -0.687 1.244 0.136 C94S\n" "2 5 1 3 37 0.000 0.000 0.056 C94\n" "0 5 1 3 43 0.000 1.027 0.360 X94\n" "0 5 1 3 51 0.000 1.543 0.350 X94\n" "0 5 1 3 53 0.000 0.501 0.000 X94\n" "0 5 1 3 74 0.000 0.513 -0.344 X94\n" "0 5 1 3 75 0.000 0.511 -0.186 X94\n" "0 6 1 3 6 0.447 0.652 0.318 C94\n" "0 6 1 3 7 -0.395 0.730 -0.139 C94\n" "0 10 1 3 7 0.530 2.905 2.756 C94S\n" "0 10 1 3 10 0.465 -0.241 1.850 C94S\n" "0 0 1 4 0 0.000 0.000 0.000 C94 " "0:*-1-4-* Def\n" "0 0 1 6 0 0.000 0.000 0.200 C94 " "0:*-1-6-* Def\n" "5 0 1 6 0 0.000 -0.200 0.400 C94 " "5:*-1-6-* Def\n" "0 1 1 6 1 -0.681 0.755 0.755 C94\n" "5 1 1 6 1 0.000 0.243 -0.596 C94\n" "0 1 1 6 3 -0.547 0.000 0.320 C94\n" "0 1 1 6 21 0.000 0.270 0.237 C94\n" "0 2 1 6 21 0.102 0.460 -0.128 C94\n" "0 3 1 6 21 -1.652 -1.660 0.283 C94\n" "0 5 1 6 1 0.571 0.319 0.570 C94\n" "0 5 1 6 2 0.000 0.000 0.306 C94\n" "0 5 1 6 3 0.572 0.000 -0.304 C94\n" "0 5 1 6 21 0.596 -0.276 0.346 C94\n" "0 5 1 6 25 0.000 0.000 0.061 X94\n" "0 5 1 6 37 0.000 0.000 0.106 C94\n" "0 5 1 6 45 0.000 0.000 -0.174 X94\n" "0 6 1 6 1 0.229 -0.710 0.722 C94\n" "5 6 1 6 1 0.000 0.000 0.040 C94\n" "0 6 1 6 21 1.488 -3.401 -0.320 C94\n" "0 37 1 6 21 0.712 1.320 -0.507 C94\n" "0 0 1 8 0 0.000 -0.300 0.500 C94 " "0:*-1-8-* Def\n" "5 0 1 8 0 0.000 0.000 0.297 E94 " "5:*1-8-* Def\n" "0 1 1 8 1 -0.439 0.786 0.272 C94\n" "5 1 1 8 1 0.115 -0.390 0.658 C94\n" "0 1 1 8 6 -0.608 0.339 1.496 C94\n" "0 1 1 8 23 -0.428 0.323 0.280 C94\n" "0 2 1 8 23 0.594 -0.409 0.155 C94\n" "0 5 1 8 1 0.393 -0.385 0.562 C94\n" "0 5 1 8 6 0.598 -0.158 0.399 C94\n" "0 5 1 8 23 -0.152 -0.440 0.357 C94\n" "0 0 1 9 0 0.000 0.000 0.000 C94 " "0:*-1-9-* Def\n" "5 0 1 9 0 0.000 0.000 0.000 E94 " "5:*1-9-* Def\n" "0 5 1 9 3 0.204 -0.335 -0.352 C94\n" "0 5 1 9 53 0.000 0.000 0.097 X94\n" "0 0 1 10 0 0.000 0.000 0.300 C94 " "0:*-1-10-* Def\n" "5 0 1 10 0 0.000 0.000 0.000 E94 " "5:*1-10-* Def\n" "0 0 1 10 3 0.000 0.000 1.000 C94 " "0:*-1-10-3 Def\n" "0 1 1 10 3 -0.884 0.578 0.818 C94S\n" "0 1 1 10 6 0.000 -0.379 0.565 C94S\n" "0 1 1 10 28 0.750 -0.404 0.369 C94S\n" "0 3 1 10 3 3.219 -2.699 1.875 C94S\n" "0 3 1 10 28 0.207 0.461 0.324 C94S\n" "0 5 1 10 1 0.000 0.000 0.706 C94S\n" "0 5 1 10 3 -2.334 1.517 -0.065 C94S\n" "0 5 1 10 6 0.000 0.688 0.665 C94S\n" "0 5 1 10 28 -0.982 -0.207 0.166 C94S\n" "0 0 1 15 0 0.000 0.000 0.400 C94 " "0:*-1-15-* Def\n" "5 0 1 15 0 0.000 0.000 0.336 E94 " "5:*1-15-* Def\n" "0 1 1 15 1 -1.047 0.170 0.398 C94\n" "0 1 1 15 15 -1.438 0.263 0.501 C94\n" "0 1 1 15 71 -0.376 -0.133 0.288 C94\n" "0 5 1 15 1 1.143 -0.231 0.447 C94\n" "0 5 1 15 15 1.555 -0.323 0.456 C94\n" "0 5 1 15 37 0.000 0.000 0.459 C94\n" "0 5 1 15 71 0.229 0.203 0.440 C94\n" "0 0 1 17 0 0.000 0.000 0.350 C94 " "0:*-1-17-* Def\n" "5 0 1 17 0 0.000 0.000 0.000 E94 " "5:*1-17-* Def\n" "0 5 1 17 1 0.000 0.000 0.536 X94\n" "0 5 1 17 7 0.000 0.000 0.212 X94\n" "0 0 1 18 0 0.000 0.000 0.100 C94 " "0:*-1-18-* Def\n" "5 0 1 18 0 0.000 0.000 0.112 E94 " "5:*1-18-* Def\n" "0 5 1 18 1 0.000 0.000 0.000 X94\n" "0 5 1 18 6 0.000 0.000 0.099 X94\n" "0 5 1 18 32 0.000 0.585 0.388 X94\n" "0 5 1 18 43 0.000 -0.412 0.121 X94\n" "0 5 1 18 48 0.000 0.000 0.195 X94\n" "0 5 1 18 62 0.000 0.000 -0.088 X94\n" "0 0 1 19 0 0.000 0.000 0.150 C94 " "0:*-1-19-* Def\n" "5 0 1 19 0 0.000 0.000 0.179 E94 " "5:*1-19-* Def\n" "0 5 1 19 5 0.000 0.000 0.196 X94\n" "0 5 1 19 6 0.000 0.000 0.176 X94\n" "0 5 1 19 12 0.000 0.000 0.152 X94\n" "0 0 1 20 0 0.000 0.000 0.350 C94 " "0:*-1-20-* Def\n" "5 0 1 20 0 0.000 0.000 0.350 C94 " "5:*-1-20-* Def\n" "0 5 1 20 5 0.000 0.000 0.344 C94\n" "0 5 1 20 20 0.000 0.000 0.361 C94\n" "0 0 1 22 0 0.000 0.000 0.236 E94 " "0:*1-22-* Def\n" "5 0 1 22 0 0.000 0.000 0.236 E94 " "5:*1-22-* Def\n" "0 0 1 25 0 0.000 0.000 0.300 C94 " "0:*-1-25-* Def\n" "5 0 1 25 0 0.000 0.000 0.251 E94 " "5:*1-25-* Def\n" "0 1 1 25 1 0.000 -0.207 0.232 X94\n" "0 1 1 25 32 0.000 0.288 0.218 X94\n" "0 5 1 25 1 0.000 0.152 0.235 X94\n" "0 5 1 25 6 0.000 0.000 0.495 X94\n" "0 5 1 25 32 0.000 -0.130 0.214 X94\n" "0 5 1 25 43 0.000 0.000 0.466 X94\n" "0 5 1 25 72 0.000 0.000 0.243 X94\n" "0 0 1 26 0 0.000 0.000 0.450 C94 " "0:*-1-26-* Def\n" "5 0 1 26 0 0.000 0.000 0.376 E94 " "5:*1-26-* Def\n" "0 5 1 26 12 0.000 0.000 0.439 X94\n" "0 5 1 26 71 0.000 0.000 0.472 X94\n" "0 0 1 34 0 0.000 0.000 0.250 C94 " "0:*-1-34-* Def\n" "5 0 1 34 0 0.000 0.000 0.198 E94 " "5:*1-34-* Def\n" "0 1 1 34 36 0.000 0.000 0.187 C94\n" "0 5 1 34 1 0.000 0.000 0.247 C94\n" "0 5 1 34 36 0.000 0.000 0.259 C94\n" "0 0 1 37 0 0.000 0.000 0.200 C94 " "0:*-1-37-* Def\n" "5 0 1 37 0 0.000 0.000 0.000 E94 " "5:*1-37-* Def\n" "0 1 1 37 37 0.000 0.449 0.000 C94\n" "0 5 1 37 37 0.000 -0.420 0.391 C94\n" "0 6 1 37 37 0.000 0.000 0.150 C94\n" "0 0 1 39 0 0.000 0.000 0.000 C94 " "0:*-1-39-* Def\n" "5 0 1 39 0 0.000 0.000 0.000 E94 " "5:*1-39-* Def\n" "0 1 1 39 63 0.000 -0.080 -0.056 C94\n" "0 5 1 39 63 0.000 0.000 -0.113 C94\n" "0 0 1 40 0 0.000 0.000 0.250 C94 " "0:*-1-40-* Def\n" "5 0 1 40 0 0.000 0.000 0.297 E94 " "5:*1-40-* Def\n" "0 5 1 40 28 0.000 -0.105 0.000 C94S\n" "0 5 1 40 37 0.000 0.000 0.468 C94S\n" "0 0 1 41 0 0.000 0.600 0.000 C94 " "0:*-1-41-* Def\n" "0 1 1 41 32 0.000 1.263 0.000 C94\n" "0 5 1 41 32 0.000 0.000 -0.106 C94\n" "0 5 1 41 72 0.000 0.632 0.000 X94\n" "0 0 1 43 0 0.000 0.000 0.150 C94 " "0:*-1-43-* Def\n" "5 0 1 43 0 0.000 0.000 0.297 E94 " "5:*1-43-* Def\n" "0 5 1 43 18 0.357 -0.918 0.000 X94\n" "0 5 1 43 25 0.000 0.000 0.061 X94\n" "0 5 1 43 28 -0.249 0.382 0.343 X94\n" "0 0 1 45 0 0.000 0.000 0.100 C94 " "0:*-1-45-* Def\n" "0 5 1 45 32 0.000 0.000 0.125 X94\n" "0 0 1 46 0 0.000 0.000 -0.500 C94 " "0:*-1-46-* Def\n" "0 5 1 46 7 0.000 0.000 -0.540 X94\n" "0 0 1 54 0 0.000 0.000 0.000 C94 " "0:*-1-54-* Def\n" "2 0 1 54 0 0.000 0.000 0.000 E94 " "2:*1-54-* Def\n" "5 0 1 54 0 0.000 0.000 0.000 E94 " "5:*1-54-* Def\n" "0 5 1 54 3 0.000 0.000 -0.315 C94\n" "0 5 1 54 36 0.000 0.000 0.315 C94\n" "0 0 1 55 0 0.000 0.000 0.000 C94 " "0:*-1-55-* Def\n" "5 0 1 55 0 0.000 0.000 0.000 E94 " "5:*1-55-* Def\n" "0 5 1 55 36 0.000 -0.058 0.084 C94\n" "0 5 1 55 57 0.000 -0.058 -0.092 C94\n" "0 0 1 56 0 0.000 0.000 -0.300 C94 " "0:*-1-56-* Def\n" "0 1 1 56 36 0.875 0.668 -0.015 C94\n" "0 1 1 56 57 -0.870 0.775 -0.406 C94\n" "0 5 1 56 36 -0.958 -0.629 -0.372 C94\n" "0 5 1 56 57 0.952 -0.715 -0.483 C94\n" "0 0 1 57 0 0.000 0.000 0.000 E94 " "0:*1-57-* Def\n" "5 0 1 57 0 0.000 0.000 0.000 E94 " "5:*1-57-* Def\n" "0 0 1 58 0 0.000 0.000 0.000 E94 " "0:*1-58-* Def\n" "0 0 1 62 0 0.000 0.000 0.250 C94 " "0:*-1-62-* Def\n" "0 5 1 62 18 0.000 0.000 0.270 X94\n" "0 0 1 63 0 0.000 0.000 0.000 E94 " "0:*1-63-* Def\n" "5 0 1 63 0 0.000 0.000 0.000 E94 " "5:*1-63-* Def\n" "0 0 1 64 0 0.000 0.000 0.000 E94 " "0:*1-64-* Def\n" "5 0 1 64 0 0.000 0.000 0.000 E94 " "5:*1-64-* Def\n" "0 0 1 67 0 0.000 0.000 0.000 E94 " "0:*1-67-* Def\n" "5 0 1 67 0 0.000 0.000 0.000 E94 " "5:*1-67-* Def\n" "0 0 1 68 0 0.000 0.000 0.400 C94 " "0:*-1-68-* Def\n" "0 1 1 68 1 -0.117 0.090 0.751 C94\n" "0 1 1 68 23 0.373 0.153 0.635 C94\n" "0 1 1 68 32 -0.090 -0.169 0.075 C94\n" "0 5 1 68 1 0.134 -0.112 0.329 C94\n" "0 5 1 68 23 -0.361 -0.202 0.560 C94\n" "0 5 1 68 32 0.072 0.218 0.093 C94\n" "0 0 1 73 0 0.000 0.000 0.500 C94 " "0:*-1-73-* Def\n" "0 5 1 73 32 0.000 0.000 0.509 X94\n" "0 5 1 73 72 0.000 0.000 0.443 X94\n" "0 0 1 75 0 0.000 0.000 0.000 E94 " "0:*1-75-* Def\n" "0 0 1 78 0 0.000 0.000 0.000 E94 " "0:*1-78-* Def\n" "0 0 1 80 0 0.000 0.000 0.000 E94 " "0:*1-80-* Def\n" "0 0 1 81 0 0.000 0.000 0.000 E94 " "0:*1-81-* Def\n" "0 0 2 2 0 0.000 12.000 0.000 C94 " "0:*-2=2-* Def\n" "1 0 2 2 0 0.000 1.800 0.000 C94 " "1:*=2-2=* Def\n" "5 0 2 2 0 0.000 12.000 0.000 C94 " "5:*-2=2-* Def\n" "0 1 2 2 1 -0.403 12.000 0.000 C94\n" "0 1 2 2 2 0.000 12.000 0.000 C94\n" "1 1 2 2 2 -0.418 2.089 -0.310 C94\n" "0 1 2 2 5 0.000 12.000 0.000 C94\n" "1 1 2 2 5 0.412 2.120 0.269 C94\n" "1 2 2 2 2 0.094 1.621 0.877 C94\n" "0 2 2 2 5 0.000 12.000 0.000 C94\n" "1 2 2 2 5 0.317 1.421 -0.870 C94\n" "0 3 2 2 5 0.000 12.000 0.000 C94\n" "0 5 2 2 5 0.000 12.000 0.000 C94\n" "1 5 2 2 5 -0.406 1.767 0.000 C94\n" "0 5 2 2 6 0.000 12.000 0.000 C94\n" "0 5 2 2 37 0.000 12.000 0.000 C94\n" "0 5 2 2 40 0.000 12.000 0.000 C94\n" "0 5 2 2 41 0.000 12.000 0.000 C94\n" "0 5 2 2 45 0.000 12.000 0.000 X94\n" "0 5 2 2 62 0.000 12.000 0.000 X94\n" "1 0 2 3 0 0.000 2.500 0.000 #E94 " "0:*-2-3-* Def\n" "1 1 2 3 1 0.136 1.798 0.630 C94\n" "1 1 2 3 5 0.497 2.405 0.357 C94\n" "1 1 2 3 6 -0.211 1.925 -0.131 C94\n" "1 1 2 3 7 -0.401 2.028 -0.318 C94\n" "1 1 2 3 10 0.000 2.237 -0.610 C94S\n" "1 2 2 3 1 -0.325 1.553 -0.487 C94\n" "1 2 2 3 5 -0.295 2.024 -0.590 C94\n" "1 2 2 3 6 -0.143 1.466 0.000 C94\n" "1 2 2 3 7 0.362 1.978 0.000 C94\n" "1 2 2 3 9 0.296 1.514 0.481 C94\n" "1 2 2 3 10 0.000 1.599 0.380 C94S\n" "1 5 2 3 1 0.213 1.728 -0.042 C94\n" "1 5 2 3 5 -0.208 1.622 0.223 C94\n" "1 5 2 3 6 0.359 1.539 0.194 C94\n" "1 5 2 3 7 0.000 2.046 0.000 C94\n" "1 5 2 3 9 -0.290 1.519 -0.470 C94\n" "1 5 2 3 10 0.000 1.409 0.254 C94S\n" "1 0 2 4 0 0.000 0.000 0.000 C94 " "0:*-2-4-* Def\n" "0 0 2 6 0 0.000 3.100 0.000 C94 " "0:*-2-6-* Def\n" "2 0 2 6 0 0.000 3.600 0.000 E94 " "2:*-2-6-* Def\n" "5 0 2 6 0 0.000 3.600 0.000 E94 " "5:*-2-6-* Def\n" "0 2 2 6 1 -1.953 3.953 -1.055 C94\n" "0 2 2 6 3 -1.712 2.596 -0.330 C94\n" "0 2 2 6 29 -0.215 2.810 -0.456 C94\n" "0 5 2 6 1 1.951 3.936 1.130 C94\n" "0 5 2 6 3 1.719 2.628 0.360 C94\n" "0 5 2 6 29 0.216 2.808 0.456 C94\n" "1 0 2 9 0 0.000 1.800 0.000 E94 " "1:*-2-9-* Def\n" "0 0 2 10 0 0.000 6.000 0.000 E94 " "0:*-2-10-* Def\n" "2 0 2 10 0 0.000 6.000 0.000 E94 " "2:*-2-10-* Def\n" "5 0 2 10 0 0.000 6.000 0.000 E94 " "5:*-2-10-* Def\n" "0 0 2 15 0 0.000 1.423 0.000 E94 " "0:*-2-15-* Def\n" "2 0 2 15 0 0.000 1.423 0.000 E94 " "2:*-2-15-* Def\n" "5 0 2 15 0 0.000 1.423 0.000 E94 " "5:*-2-15-* Def\n" "0 0 2 17 0 0.000 1.423 0.000 E94 " "0:*-2-17-* Def\n" "0 0 2 18 0 0.000 0.000 0.000 E94 " "0:*-2-18-* Def\n" "2 0 2 18 0 0.000 0.000 0.000 E94 " "2:*-2-18-* Def\n" "5 0 2 18 0 0.000 0.000 0.000 E94 " "5:*-2-18-* Def\n" "0 0 2 19 0 0.000 0.000 0.000 E94 " "0:*-2-19-* Def\n" "0 0 2 20 0 0.000 0.000 0.000 E94 " "0:*-2-20-* Def\n" "2 0 2 20 0 0.000 0.000 0.000 E94 " "2:*-2-20-* Def\n" "0 0 2 22 0 0.000 0.000 0.000 E94 " "0:*-2-22-* Def\n" "2 0 2 22 0 0.000 0.000 0.000 E94 " "2:*-2-22-* Def\n" "5 0 2 22 0 0.000 0.000 0.000 E94 " "5:*-2-22-* Def\n" "0 0 2 25 0 0.000 0.000 0.000 E94 " "0:*-2-25-* Def\n" "0 0 2 30 0 0.000 12.000 0.000 E94 " "0:*-2-30-* Def\n" "0 0 2 34 0 0.000 0.000 0.000 E94 " "0:*-2-34-* Def\n" "2 0 2 34 0 0.000 0.000 0.000 E94 " "2:*-2-34-* Def\n" "1 0 2 37 0 0.000 2.000 0.000 C94 " "1:*-2-37-* Def\n" "1 1 2 37 37 0.000 2.952 -0.079 C94\n" "1 2 2 37 37 0.000 1.542 0.434 C94\n" "1 5 2 37 37 0.000 1.308 -0.357 C94\n" "1 0 2 39 0 0.000 6.000 0.000 E94 " "1:*-2-39-* Def\n" "0 0 2 40 0 0.000 3.700 0.000 C94 " "0:*-2-40-* Def\n" "2 0 2 40 0 0.000 3.600 0.000 E94 " "2:*-2-40-* Def\n" "5 0 2 40 0 0.000 3.600 0.000 E94 " "5:*-2-40-* Def\n" "0 2 2 40 28 0.000 3.305 -0.530 C94S\n" "0 5 2 40 28 0.139 3.241 0.139 C94S\n" "0 0 2 41 0 0.000 1.200 0.000 C94 " "0:*-2-41-* Def\n" "2 0 2 41 0 0.000 1.800 0.000 E94 " "2:*-2-41-* Def\n" "0 2 2 41 32 0.000 1.235 0.000 C94\n" "0 5 2 41 32 0.000 1.231 0.000 C94\n" "0 0 2 43 0 0.000 3.600 0.000 E94 " "0:*-2-43-* Def\n" "2 0 2 43 0 0.000 3.600 0.000 E94 " "2:*-2-43-* Def\n" "0 0 2 45 0 0.000 2.200 0.000 C94 " "0:*-2-45-* Def\n" "2 0 2 45 0 0.000 1.800 0.000 E94 " "2:*-2-45-* Def\n" "0 2 2 45 32 0.000 2.212 0.000 X94\n" "0 5 2 45 32 0.000 2.225 0.000 X94\n" "0 0 2 46 0 0.000 1.800 0.000 E94 " "0:*-2-46-* Def\n" "2 0 2 46 0 0.000 1.800 0.000 E94 " "2:*-2-46-* Def\n" "0 0 2 55 0 0.000 4.800 0.000 E94 " "0:*-2-55-* Def\n" "0 0 2 56 0 0.000 4.800 0.000 E94 " "0:*-2-56-* Def\n" "0 0 2 62 0 0.000 8.000 0.000 C94 " "0:*-2-62-* Def\n" "0 2 2 62 23 1.693 7.903 0.532 X94\n" "0 5 2 62 23 -1.696 7.897 -0.482 X94\n" "1 0 2 63 0 0.000 1.800 0.000 E94 " "1:*-2-63-* Def\n" "1 0 2 64 0 0.000 1.800 0.000 E94 " "1:*-2-64-* Def\n" "1 0 2 67 0 0.000 1.800 0.000 E94 " "1:*-2-67-* Def\n" "1 0 2 81 0 0.000 4.800 0.000 E94 " "1:*-2-81-* Def\n" "1 0 3 3 0 0.000 0.600 0.000 C94 " "0:*-3-3-* Def\n" "4 0 3 3 0 0.000 1.800 0.000 E94 " "4:*-3-3-* Def\n" "1 1 3 3 1 -0.486 0.714 0.000 C94\n" "1 1 3 3 6 -0.081 -0.125 0.132 C94\n" "1 1 3 3 7 1.053 1.327 0.000 C94\n" "1 5 3 3 6 0.000 0.188 0.436 C94\n" "1 5 3 3 7 0.000 0.177 -0.412 C94\n" "1 6 3 3 6 0.269 0.437 0.000 C94\n" "1 6 3 3 7 -0.495 0.793 -0.318 C94\n" "1 7 3 3 7 -0.260 1.084 0.193 C94\n" "0 0 3 6 0 0.000 5.500 0.000 C94 " "0:*-3-6-* Def\n" "2 0 3 6 0 0.000 5.500 0.000 C94 " "2:*-3-6-* Def\n" "4 0 3 6 0 0.000 3.600 0.000 E94 " "4:*-3-6-* Def\n" "5 0 3 6 0 0.000 3.600 0.000 E94 " "5:*-3-6-* Def\n" "0 1 3 6 1 -1.244 5.482 0.365 C94\n" "0 1 3 6 24 -1.166 5.078 -0.545 C94\n" "0 1 3 6 37 -0.677 5.854 0.521 C94\n" "2 2 3 6 24 0.256 4.519 0.258 C94\n" "2 3 3 6 24 1.663 4.073 0.094 C94\n" "0 5 3 6 1 0.526 5.631 0.691 C94\n" "0 5 3 6 2 0.159 6.586 0.216 C94\n" "0 5 3 6 24 -2.285 4.737 0.468 C94\n" "0 7 3 6 0 0.700 6.500 -0.400 C94 " "0:7-3-6-* Def\n" "0 7 3 6 1 0.682 7.184 -0.935 C94\n" "0 7 3 6 2 -0.168 6.572 -0.151 C94\n" "0 7 3 6 24 1.662 6.152 -0.058 C94\n" "0 7 3 6 37 0.635 5.890 -0.446 C94\n" "2 37 3 6 24 0.000 3.892 -0.094 C94\n" "0 0 3 9 0 0.000 16.000 0.000 C94 " "0:*-3=9-* Def\n" "1 0 3 9 0 0.000 1.800 0.000 E94 " "1:*-3-9-* Def\n" "5 0 3 9 0 0.000 12.000 0.000 E94 " "5:*-3-9-* Def\n" "0 2 3 9 27 0.000 16.000 0.000 C94\n" "0 5 3 9 1 0.687 16.152 0.894 C94\n" "0 5 3 9 27 0.000 16.000 0.000 C94\n" "0 40 3 9 1 -0.704 18.216 0.000 C94S\n" "0 40 3 9 27 0.000 16.000 0.178 C94S\n" "0 0 3 10 0 0.000 6.000 0.000 C94 " "0:*-3-10-* Def\n" "2 0 3 10 0 0.000 6.000 0.000 C94 " "2:*-3-10-* Def\n" "4 0 3 10 0 0.000 6.000 0.000 C94 " "4:*-3-10-* Def\n" "5 0 3 10 0 0.000 6.000 0.000 E94 " "5:*-3-10-* Def\n" "0 1 3 10 1 0.831 6.061 0.522 C94S\n" "0 1 3 10 6 -1.152 8.588 1.511 C94S\n" "0 1 3 10 28 -0.259 5.934 1.326 C94S\n" "2 2 3 10 28 0.000 6.561 0.294 C94S\n" "0 5 3 10 1 -0.195 6.304 1.722 C94S\n" "0 5 3 10 3 -0.705 5.383 0.234 C94S\n" "0 5 3 10 28 -0.417 5.981 0.511 C94S\n" "0 7 3 10 1 -0.491 6.218 0.000 C94S\n" "0 7 3 10 3 0.733 -0.543 -0.163 C94S\n" "0 7 3 10 6 1.234 8.372 -0.539 C94S\n" "0 7 3 10 28 1.168 4.857 -0.341 C94S\n" "0 10 3 10 28 0.000 3.706 1.254 C94S\n" "0 0 3 15 0 0.000 1.423 0.000 E94 " "0:*-3-15-* Def\n" "2 0 3 15 0 0.000 1.423 0.000 E94 " "2:*-3-15-* Def\n" "4 0 3 15 0 0.000 1.423 0.000 E94 " "4:*-3-15-* Def\n" "5 0 3 15 0 0.000 1.423 0.000 E94 " "5:*-3-15-* Def\n" "0 0 3 17 0 0.000 1.423 0.000 E94 " "0:*-3-17-* Def\n" "5 0 3 17 0 0.000 1.423 0.000 E94 " "5:*-3-17-* Def\n" "0 0 3 18 0 0.000 0.000 0.000 E94 " "0:*-3-18-* Def\n" "2 0 3 18 0 0.000 0.000 0.000 E94 " "2:*-3-18-* Def\n" "0 0 3 20 0 0.000 0.000 -0.300 C94 " "0:*-3-20-* Def\n" "2 0 3 20 0 0.000 0.000 0.000 E94 " "2:*-3-20-* Def\n" "4 0 3 20 0 0.000 0.000 -0.300 C94 " "4:*-3-20-* Def\n" "5 0 3 20 0 0.000 0.000 0.000 E94 " "5:*-3-20-* Def\n" "0 7 3 20 0 0.000 0.400 0.400 C94 " "0:7-3-20-* Def\n" "0 7 3 20 5 0.000 0.000 -0.131 C94\n" "0 7 3 20 20 0.000 0.000 0.000 C94\n" "0 20 3 20 5 0.000 0.000 0.085 C94\n" "0 20 3 20 20 0.000 0.000 0.000 C94\n" "0 0 3 22 0 0.000 0.000 0.000 E94 " "0:*-3-22-* Def\n" "2 0 3 22 0 0.000 0.000 0.000 E94 " "2:*-3-22-* Def\n" "4 0 3 22 0 0.000 0.000 0.000 E94 " "4:*-3-22-* Def\n" "5 0 3 22 0 0.000 0.000 0.000 E94 " "5:*-3-22-* Def\n" "0 7 3 22 0 0.000 0.400 0.400 C94 " "0:7-3-22-* Def\n" "0 0 3 25 0 0.000 0.000 0.000 E94 " "0:*-3-25-* Def\n" "2 0 3 25 0 0.000 0.000 0.000 E94 " "2:*-3-25-* Def\n" "1 0 3 30 0 0.000 1.800 0.000 E94 " "1:*-3-30-* Def\n" "4 0 3 30 0 0.000 1.800 0.000 E94 " "4:*-3-30-* Def\n" "1 0 3 37 0 0.000 2.500 0.000 #E94 " "1:*-3-37-* Def\n" "4 0 3 37 0 0.000 1.800 0.000 E94 " "4:*-3-37-* Def\n" "1 1 3 37 37 0.000 2.428 0.000 C94\n" "1 6 3 37 37 0.000 1.743 0.000 C94\n" "1 7 3 37 37 0.000 2.256 0.000 C94\n" "1 43 3 37 37 -0.241 3.385 -0.838 X94\n" "1 0 3 39 0 0.000 5.500 0.000 #E94 " "1:*-3-39-* Def\n" "0 0 3 40 0 0.000 3.900 0.000 C94 " "0:*-3-40-* Def\n" "2 0 3 40 0 0.000 3.600 0.000 E94 " "2:*-3-40-* Def\n" "5 0 3 40 0 0.000 3.600 0.000 E94 " "5:*-3-40-* Def\n" "0 5 3 40 28 -1.355 3.964 0.800 C94S\n" "0 9 3 40 28 1.045 3.785 -0.291 C94S\n" "0 40 3 40 28 0.508 2.985 0.809 C94S\n" "0 0 3 41 0 0.000 1.800 0.000 E94 " "0:*-3-41-* Def\n" "2 0 3 41 0 0.000 1.800 0.000 E94 " "2:*-3-41-* Def\n" "0 0 3 43 0 0.000 4.500 0.000 C94 " "0:*-3-43-* Def\n" "2 0 3 43 0 0.000 3.600 0.000 E94 " "2:*-3-43-* Def\n" "4 0 3 43 0 0.000 3.600 0.000 E94 " "4:*-3-43-* Def\n" "5 0 3 43 0 0.000 3.600 0.000 E94 " "5:*-3-43-* Def\n" "0 1 3 43 18 1.712 3.309 0.233 X94\n" "0 1 3 43 28 -0.414 4.168 -0.875 X94\n" "0 7 3 43 18 -0.880 5.091 -0.129 X94\n" "0 7 3 43 28 0.536 5.276 -0.556 X94\n" "2 37 3 43 18 -0.701 4.871 1.225 X94\n" "2 37 3 43 28 -0.086 5.073 0.878 X94\n" "0 0 3 45 0 0.000 1.800 0.000 E94 " "0:*-3-45-* Def\n" "2 0 3 45 0 0.000 1.800 0.000 E94 " "2:*-3-45-* Def\n" "0 0 3 48 0 0.000 0.000 0.892 E94 " "0:*-3-48-* Def\n" "0 0 3 51 0 0.000 13.500 0.000 C94 " "0:*-3-51-* Def\n" "0 1 3 51 52 0.000 13.549 0.000 X94\n" "0 0 3 54 0 0.000 8.000 0.000 C94 " "0:*-3-54-* Def\n" "1 0 3 54 0 0.000 2.500 0.000 #E94 " "1:*-3-54-* Def\n" "5 0 3 54 0 0.000 12.000 0.000 E94 " "5:*-3-54-* Def\n" "0 5 3 54 1 0.000 8.000 0.000 C94\n" "0 5 3 54 36 0.000 8.000 0.000 C94\n" "0 0 3 55 0 0.000 4.800 0.000 E94 " "0:*-3-55-* Def\n" "2 0 3 55 0 0.000 4.800 0.000 E94 " "2:*-3-55-* Def\n" "0 0 3 56 0 0.000 4.800 0.000 E94 " "0:*-3-56-* Def\n" "2 0 3 56 0 0.000 4.800 0.000 E94 " "2:*-3-56-* Def\n" "1 0 3 57 0 0.000 2.500 0.000 #E94 " "1:*-3-57-* Def\n" "1 0 3 58 0 0.000 4.800 0.000 E94 " "1:*-3-58-* Def\n" "0 0 3 62 0 0.000 3.600 0.000 E94 " "0:*-3-62-* Def\n" "2 0 3 62 0 0.000 3.600 0.000 E94 " "2:*-3-62-* Def\n" "5 0 3 62 0 0.000 3.600 0.000 E94 " "5:*-3-62-* Def\n" "1 0 3 63 0 0.000 2.500 0.000 #E94 " "1:*-3-63-* Def\n" "1 0 3 64 0 0.000 2.500 0.000 #E94 " "1:*-3-64-* Def\n" "0 0 3 67 0 0.000 12.000 0.000 E94 " "0:*-3-67-* Def\n" "0 0 3 74 0 0.000 19.000 0.000 C94 " "0:*-3-74-* Def\n" "0 1 3 74 7 0.000 19.349 0.000 X94\n" "0 0 3 75 0 0.000 19.000 0.000 C94 " "0:*-3-75-* Def\n" "0 1 3 75 71 0.000 18.751 0.000 X94\n" "1 0 3 78 0 0.000 2.500 0.000 #E94 " "1:*-3-78-* Def\n" "1 0 3 80 0 0.000 2.500 0.000 #E94 " "1:*-3-80-* Def\n" "0 0 6 6 0 0.000 -2.000 0.000 E94 " "0:*-6-6-* Def\n" "5 0 6 6 0 0.000 -2.000 0.000 E94 " "5:*-6-6-* Def\n" "0 0 6 8 0 0.900 -1.100 -0.500 C94 " "0:*-6-8-* Def\n" "5 0 6 8 0 0.000 0.000 0.274 E94 " "5:*-6-8-* Def\n" "0 21 6 8 1 0.261 -0.330 -0.542 C94\n" "0 21 6 8 23 1.503 -1.853 -0.476 C94\n" "0 0 6 9 0 0.000 3.600 0.000 E94 " "0:*-6-9-* Def\n" "5 0 6 9 0 0.000 3.600 0.000 E94 " "5:*-6-9-* Def\n" "0 0 6 10 0 1.200 0.500 -1.000 C94 " "0:*-6-10-* Def\n" "0 21 6 10 1 0.829 0.000 -0.730 C94S\n" "0 21 6 10 3 0.675 -0.185 -1.053 C94S\n" "0 0 6 15 0 0.000 -4.000 0.000 E94 " "0:*-6-15-* Def\n" "0 0 6 17 0 0.000 1.423 0.000 E94 " "0:*-6-17-* Def\n" "5 0 6 17 0 0.000 1.423 0.000 E94 " "5:*-6-17-* Def\n" "0 0 6 18 0 0.000 0.000 0.100 C94 " "0:*-6-18-* Def\n" "5 0 6 18 0 0.000 0.000 0.103 E94 " "5:*-6-18-* Def\n" "0 33 6 18 1 -0.520 -0.471 -0.267 X94\n" "0 33 6 18 6 -1.623 0.204 0.438 X94\n" "0 33 6 18 32 1.616 0.425 0.191 X94\n" "0 0 6 19 0 0.000 0.000 0.150 C94 " "0:*-6-19-* Def\n" "5 0 6 19 0 0.000 0.000 0.165 E94 " "5:*-6-19-* Def\n" "0 21 6 19 1 -0.620 -0.329 0.303 X94\n" "0 21 6 19 5 0.683 0.220 0.000 X94\n" "0 0 6 20 0 0.000 0.000 0.400 C94 " "0:*-6-20-* Def\n" "4 0 6 20 0 0.000 0.000 0.217 E94 " "4:*-6-20-* Def\n" "5 0 6 20 0 0.000 0.000 0.217 E94 " "5:*-6-20-* Def\n" "0 20 6 20 5 0.000 0.000 -0.079 C94\n" "4 20 6 20 20 0.000 0.000 0.000 C94\n" "0 0 6 22 0 0.000 0.000 0.217 E94 " "0:*-6-22-* Def\n" "0 0 6 25 0 0.000 0.000 0.650 C94 " "0:*-6-25-* Def\n" "5 0 6 25 0 0.000 0.000 0.231 E94 " "5:*-6-25-* Def\n" "0 1 6 25 1 -1.704 -0.452 0.556 X94\n" "0 1 6 25 6 0.000 0.000 0.777 X94\n" "0 1 6 25 32 1.205 0.914 0.612 X94\n" "0 24 6 25 6 -3.209 -7.622 1.065 X94\n" "0 24 6 25 32 -5.891 -3.332 0.290 X94\n" "0 0 6 26 0 0.000 0.000 0.346 E94 " "0:*-6-26-* Def\n" "0 0 6 30 0 0.000 3.600 0.000 E94 " "0:*-6-30-* Def\n" "2 0 6 30 0 0.000 3.600 0.000 E94 " "2:*-6-30-* Def\n" "0 0 6 37 0 0.000 3.200 0.000 C94 " "0:*-6-37-* Def\n" "5 0 6 37 0 0.000 3.600 0.000 E94 " "5:*-6-37-* Def\n" "0 1 6 37 37 0.000 4.382 0.000 C94\n" "0 3 6 37 37 0.000 2.576 0.000 C94\n" "0 29 6 37 37 0.000 2.801 0.000 C94\n" "0 0 6 39 0 0.000 0.000 0.000 E94 " "0:*-6-39-* Def\n" "0 0 6 40 0 0.000 0.000 0.274 E94 " "0:*-6-40-* Def\n" "0 0 6 41 0 0.000 3.600 0.000 E94 " "0:*-6-41-* Def\n" "0 0 6 43 0 0.000 0.000 0.274 E94 " "0:*-6-43-* Def\n" "0 0 6 45 0 0.000 6.000 0.000 C94 " "0:*-6-45-* Def\n" "0 1 6 45 32 0.000 6.208 0.000 X94\n" "0 0 6 54 0 0.000 3.600 0.000 E94 " "0:*-6-54-* Def\n" "0 0 6 55 0 0.000 3.600 0.000 E94 " "0:*-6-55-* Def\n" "0 0 6 57 0 0.000 3.600 0.000 E94 " "0:*-6-57-* Def\n" "0 0 6 58 0 0.000 3.600 0.000 E94 " "0:*-6-58-* Def\n" "0 0 6 63 0 0.000 3.600 0.000 E94 " "0:*-6-63-* Def\n" "0 0 6 64 0 0.000 3.600 0.000 E94 " "0:*-6-64-* Def\n" "0 0 8 8 0 0.000 0.000 0.375 E94 " "0:*-8-8-* Def\n" "5 0 8 8 0 0.000 0.000 0.375 E94 " "5:*-8-8-* Def\n" "0 0 8 9 0 0.000 3.600 0.000 E94 " "0:*-8-9-* Def\n" "5 0 8 9 0 0.000 3.600 0.000 E94 " "5:*-8-9-* Def\n" "0 0 8 10 0 0.000 0.000 0.000 E94 " "0:*-8-10-* Def\n" "4 0 8 10 0 0.000 0.000 0.000 E94 " "4:*-8-10-* Def\n" "0 0 8 15 0 0.000 0.000 0.424 E94 " "0:*-8-15-* Def\n" "0 0 8 17 0 0.000 1.423 0.000 E94 " "0:*-8-17-* Def\n" "4 0 8 17 0 0.000 1.423 0.000 E94 " "4:*-8-17-* Def\n" "5 0 8 17 0 0.000 1.423 0.000 E94 " "5:*-8-17-* Def\n" "0 0 8 19 0 0.000 0.000 0.225 E94 " "0:*-8-19-* Def\n" "0 0 8 20 0 0.000 0.000 0.350 C94 " "0:*-8-20-* Def\n" "4 0 8 20 0 0.000 0.000 0.300 C94 " "4:*-8-20-* Def\n" "5 0 8 20 0 0.000 0.000 0.297 E94 " "5:*-8-20-* Def\n" "0 20 8 20 5 0.000 0.120 0.472 C94\n" "4 20 8 20 20 0.000 -0.097 0.200 C94\n" "0 23 8 20 5 -0.101 -0.324 0.371 C94\n" "0 23 8 20 20 0.107 0.253 0.151 C94\n" "0 0 8 22 0 0.000 0.000 0.297 E94 " "0:*-8-22-* Def\n" "0 0 8 25 0 0.000 0.000 0.316 E94 " "0:*-8-25-* Def\n" "5 0 8 25 0 0.000 0.000 0.316 E94 " "5:*-8-25-* Def\n" "0 0 8 26 0 0.000 0.000 0.474 E94 " "0:*-8-26-* Def\n" "5 0 8 26 0 0.000 0.000 0.474 E94 " "5:*-8-26-* Def\n" "0 0 8 34 0 0.000 0.000 0.250 E94 " "0:*-8-34-* Def\n" "0 0 8 39 0 0.000 0.000 0.000 E94 " "0:*-8-39-* Def\n" "0 0 8 40 0 0.000 0.000 0.375 E94 " "0:*-8-40-* Def\n" "0 0 8 43 0 0.000 0.000 0.375 E94 " "0:*-8-43-* Def\n" "0 0 8 45 0 0.000 3.600 0.000 E94 " "0:*-8-45-* Def\n" "0 0 8 46 0 0.000 3.600 0.000 E94 " "0:*-8-46-* Def\n" "0 0 8 55 0 0.000 3.600 0.000 E94 " "0:*-8-55-* Def\n" "0 0 8 56 0 0.000 3.600 0.000 E94 " "0:*-8-56-* Def\n" "0 0 9 9 0 0.000 12.000 0.000 E94 " "0:*-9-9-* Def\n" "1 0 9 9 0 0.000 1.800 0.000 E94 " "1:*-9-9-* Def\n" "5 0 9 9 0 0.000 12.000 0.000 E94 " "5:*-9-9-* Def\n" "0 0 9 10 0 0.000 6.000 0.000 E94 " "0:*-9-10-* Def\n" "5 0 9 10 0 0.000 6.000 0.000 E94 " "5:*-9-10-* Def\n" "0 0 9 15 0 0.000 1.423 0.000 E94 " "0:*-9-15-* Def\n" "0 0 9 18 0 0.000 0.000 0.000 E94 " "0:*-9-18-* Def\n" "0 0 9 19 0 0.000 0.000 0.000 E94 " "0:*-9-19-* Def\n" "0 0 9 20 0 0.000 0.000 0.000 E94 " "0:*-9-20-* Def\n" "0 0 9 25 0 0.000 0.000 0.000 E94 " "0:*-9-25-* Def\n" "0 0 9 34 0 0.000 0.000 0.000 E94 " "0:*-9-34-* Def\n" "5 0 9 34 0 0.000 0.000 0.000 E94 " "5:*-9-34-* Def\n" "1 0 9 37 0 0.000 1.800 0.000 E94 " "1:*-9-37-* Def\n" "1 0 9 39 0 0.000 6.000 0.000 E94 " "1:*-9-39-* Def\n" "0 0 9 40 0 0.000 3.600 0.000 E94 " "0:*-9-40-* Def\n" "0 0 9 41 0 0.000 4.800 0.000 E94 " "0:*-9-41-* Def\n" "0 0 9 45 0 0.000 1.800 0.000 E94 " "0:*-9-45-* Def\n" "0 0 9 54 0 0.000 12.000 0.000 E94 " "0:*-9-54-* Def\n" "0 0 9 55 0 0.000 4.800 0.000 E94 " "0:*-9-55-* Def\n" "0 0 9 56 0 0.000 4.800 0.000 E94 " "0:*-9-56-* Def\n" "1 0 9 57 0 0.000 1.800 0.000 E94 " "1:*-9-57-* Def\n" "0 0 9 62 0 0.000 3.600 0.000 E94 " "0:*-9-62-* Def\n" "1 0 9 63 0 0.000 1.800 0.000 E94 " "1:*-9-63-* Def\n" "1 0 9 64 0 0.000 1.800 0.000 E94 " "1:*-9-64-* Def\n" "0 0 9 67 0 0.000 12.000 0.000 E94 " "0:*-9-67-* Def\n" "1 0 9 78 0 0.000 1.800 0.000 E94 " "1:*-9-78-* Def\n" "1 0 9 81 0 0.000 4.800 0.000 E94 " "1:*-9-81-* Def\n" "0 0 10 10 0 0.000 0.000 0.000 E94 " "0:*-10-10-* Def\n" "5 0 10 10 0 0.000 0.000 0.000 E94 " "5:*-10-10-* Def\n" "0 0 10 15 0 0.000 0.000 0.000 E94 " "0:*-10-15-* Def\n" "0 0 10 17 0 0.000 4.743 0.000 E94 " "0:*-10-17-* Def\n" "0 0 10 20 0 0.000 0.000 0.000 E94 " "0:*-10-20-* Def\n" "4 0 10 20 0 0.000 0.000 0.000 E94 " "4:*-10-20-* Def\n" "5 0 10 20 0 0.000 0.000 0.000 E94 " "5:*-10-20-* Def\n" "0 0 10 22 0 0.000 0.000 0.000 E94 " "0:*-10-22-* Def\n" "0 0 10 25 0 0.000 0.000 0.000 E94 " "0:*-10-25-* Def\n" "0 0 10 26 0 0.000 0.000 0.000 E94 " "0:*-10-26-* Def\n" "5 0 10 26 0 0.000 0.000 0.000 E94 " "5:*-10-26-* Def\n" "0 0 10 34 0 0.000 0.000 0.000 E94 " "0:*-10-34-* Def\n" "0 0 10 37 0 0.000 6.000 0.000 E94 " "0:*-10-37-* Def\n" "0 0 10 39 0 0.000 0.000 0.000 E94 " "0:*-10-39-* Def\n" "0 0 10 40 0 0.000 0.000 0.000 E94 " "0:*-10-40-* Def\n" "5 0 10 40 0 0.000 0.000 0.000 E94 " "5:*-10-40-* Def\n" "0 0 10 41 0 0.000 6.000 0.000 E94 " "0:*-10-41-* Def\n" "0 0 10 45 0 0.000 6.000 0.000 E94 " "0:*-10-45-* Def\n" "0 0 10 63 0 0.000 6.000 0.000 E94 " "0:*-10-63-* Def\n" "0 0 10 64 0 0.000 6.000 0.000 E94 " "0:*-10-64-* Def\n" "0 0 15 15 0 -1.400 -8.300 1.000 C94 " "0:*-15-15-* Def\n" "5 0 15 15 0 0.000 -8.000 0.000 E94 " "5:*-15-15-* Def\n" "0 1 15 15 1 -1.663 -8.408 1.433 C94\n" "0 1 15 15 71 -1.088 -8.245 0.411 C94\n" "0 0 15 18 0 0.000 0.000 0.160 E94 " "0:*-15-18-* Def\n" "0 0 15 19 0 0.000 0.000 0.255 E94 " "0:*-15-19-* Def\n" "5 0 15 19 0 0.000 0.000 0.255 E94 " "5:*-15-19-* Def\n" "0 0 15 20 0 0.000 0.000 0.336 E94 " "0:*-15-20-* Def\n" "4 0 15 20 0 0.000 0.000 0.336 E94 " "4:*-15-20-* Def\n" "0 0 15 22 0 0.000 0.000 0.336 E94 " "0:*-15-22-* Def\n" "0 0 15 25 0 0.000 0.000 0.358 E94 " "0:*-15-25-* Def\n" "4 0 15 25 0 0.000 0.000 0.358 E94 " "4:*-15-25-* Def\n" "0 0 15 26 0 0.000 0.000 0.537 E94 " "0:*-15-26-* Def\n" "0 0 15 30 0 0.000 1.423 0.000 E94 " "0:*-15-30-* Def\n" "4 0 15 30 0 0.000 1.423 0.000 E94 " "4:*-15-30-* Def\n" "0 0 15 37 0 0.000 1.300 0.000 C94 " "0:*-15-37-* Def\n" "5 0 15 37 0 0.000 1.423 0.000 E94 " "5:*-15-37-* Def\n" "0 1 15 37 37 0.000 2.177 0.000 C94\n" "0 71 15 37 37 0.000 0.505 0.333 C94\n" "0 0 15 40 0 0.000 0.000 0.424 E94 " "0:*-15-40-* Def\n" "0 0 15 43 0 0.000 0.000 0.424 E94 " "0:*-15-43-* Def\n" "0 0 15 57 0 0.000 1.423 0.000 E94 " "0:*-15-57-* Def\n" "0 0 15 63 0 0.000 1.423 0.000 E94 " "0:*-15-63-* Def\n" "0 0 15 64 0 0.000 1.423 0.000 E94 " "0:*-15-64-* Def\n" "0 0 17 20 0 0.000 0.000 0.000 E94 " "0:*-17-20-* Def\n" "4 0 17 20 0 0.000 0.000 0.000 E94 " "4:*-17-20-* Def\n" "5 0 17 20 0 0.000 0.000 0.000 E94 " "5:*-17-20-* Def\n" "0 0 17 22 0 0.000 0.000 0.000 E94 " "0:*-17-22-* Def\n" "0 0 17 37 0 0.000 1.423 0.000 E94 " "0:*-17-37-* Def\n" "0 0 17 43 0 0.000 3.795 0.000 E94 " "0:*-17-43-* Def\n" "0 0 18 20 0 0.000 0.000 0.112 E94 " "0:*-18-20-* Def\n" "4 0 18 20 0 0.000 0.000 0.112 E94 " "4:*-18-20-* Def\n" "5 0 18 20 0 0.000 0.000 0.112 E94 " "5:*-18-20-* Def\n" "0 0 18 22 0 0.000 0.000 0.112 E94 " "0:*-18-22-* Def\n" "0 0 18 37 0 0.000 -1.200 -0.300 C94 " "0:*-18-37-* Def\n" "0 32 18 37 37 -0.173 -0.965 -0.610 X94\n" "0 39 18 37 37 0.000 -0.760 0.227 X94\n" "0 43 18 37 37 0.228 -1.741 -0.371 X94\n" "0 0 18 39 0 0.000 0.000 0.500 C94 " "0:*-18-39-* Def\n" "0 32 18 39 63 0.000 0.687 0.680 X94\n" "0 37 18 39 63 0.000 -0.513 0.357 X94\n" "0 0 18 43 0 0.000 0.000 0.350 C94 " "0:*-18-43-* Def\n" "4 0 18 43 0 0.000 0.000 0.141 E94 " "4:*-18-43-* Def\n" "5 0 18 43 0 0.000 0.000 0.141 E94 " "5:*-18-43-* Def\n" "0 1 18 43 1 -0.914 -0.482 0.179 X94\n" "0 1 18 43 3 -0.392 -2.724 0.312 X94\n" "0 1 18 43 28 -1.508 -1.816 -0.175 X94\n" "0 1 18 43 37 0.823 -1.220 -0.770 X94\n" "0 32 18 43 1 1.588 1.499 1.410 X94\n" "0 32 18 43 3 0.653 0.254 0.000 X94\n" "0 32 18 43 28 0.528 0.342 0.000 X94\n" "0 32 18 43 37 0.812 1.513 1.266 X94\n" "0 37 18 43 1 -1.139 -0.703 1.088 X94\n" "0 37 18 43 28 -2.014 -1.646 -2.068 X94\n" "0 37 18 43 37 -1.519 -0.328 1.437 X94\n" "0 43 18 43 28 3.011 -1.405 2.038 X94\n" "0 0 18 48 0 0.000 0.000 0.400 C94 " "0:*-18-48-* Def\n" "0 1 18 48 28 1.767 1.606 0.408 X94\n" "0 32 18 48 28 -1.463 -2.548 0.310 X94\n" "0 0 18 55 0 0.000 0.000 0.000 E94 " "0:*-18-55-* Def\n" "0 0 18 58 0 0.000 0.000 0.000 E94 " "0:*-18-58-* Def\n" "0 0 18 62 0 0.000 0.000 0.500 C94 " "0:*-18-62-* Def\n" "0 1 18 62 1 -0.403 -0.273 0.440 X94\n" "0 32 18 62 1 0.291 0.385 0.582 X94\n" "0 0 18 63 0 0.000 0.000 0.000 E94 " "0:*-18-63-* Def\n" "0 0 18 64 0 0.000 0.000 0.000 E94 " "0:*-18-64-* Def\n" "0 0 18 80 0 0.000 0.000 0.000 E94 " "0:*-18-80-* Def\n" "0 0 19 20 0 0.000 0.000 0.179 E94 " "0:*-19-20-* Def\n" "4 0 19 20 0 0.000 0.000 0.179 E94 " "4:*-19-20-* Def\n" "0 0 19 37 0 0.000 0.000 0.000 E94 " "0:*-19-37-* Def\n" "0 0 19 40 0 0.000 0.000 0.225 E94 " "0:*-19-40-* Def\n" "0 0 19 63 0 0.000 0.000 0.000 E94 " "0:*-19-63-* Def\n" "0 0 19 75 0 0.000 0.000 0.000 E94 " "0:*-19-75-* Def\n" "0 0 20 20 0 0.000 0.000 0.200 C94 " "0:*-20-20-* Def\n" "4 0 20 20 0 0.000 0.000 0.000 C94 " "4:*-20-20-* Def\n" "5 0 20 20 0 0.000 0.000 0.236 E94 " "5:*-20-20-* Def\n" "0 1 20 20 5 0.067 0.081 0.347 C94\n" "0 1 20 20 20 -0.063 -0.064 0.140 C94\n" "0 3 20 20 5 0.000 0.000 0.083 C94\n" "0 3 20 20 20 0.000 0.000 0.000 C94\n" "0 5 20 20 5 0.000 0.000 0.424 C94\n" "0 5 20 20 6 0.000 0.000 -0.080 C94\n" "0 5 20 20 8 0.000 0.127 0.450 C94\n" "0 5 20 20 12 -0.072 -0.269 0.439 C94\n" "0 5 20 20 20 -0.057 0.000 0.307 C94\n" "4 6 20 20 20 0.000 0.000 0.000 C94\n" "4 8 20 20 20 0.000 -0.091 0.192 C94\n" "0 12 20 20 20 0.077 0.202 0.183 C94\n" "4 20 20 20 20 0.000 0.000 0.000 C94\n" "0 0 20 22 0 0.000 0.000 0.236 E94 " "0:*-20-22-* Def\n" "4 0 20 22 0 0.000 0.000 0.236 E94 " "4:*-20-22-* Def\n" "0 0 20 25 0 0.000 0.000 0.251 E94 " "0:*-20-25-* Def\n" "4 0 20 25 0 0.000 0.000 0.251 E94 " "4:*-20-25-* Def\n" "0 0 20 26 0 0.000 0.000 0.376 E94 " "0:*-20-26-* Def\n" "4 0 20 26 0 0.000 0.000 0.376 E94 " "4:*-20-26-* Def\n" "5 0 20 26 0 0.000 0.000 0.376 E94 " "5:*-20-26-* Def\n" "0 0 20 30 0 0.000 0.000 0.000 E94 " "0:*-20-30-* Def\n" "2 0 20 30 0 0.000 0.000 0.000 E94 " "2:*-20-30-* Def\n" "4 0 20 30 0 0.000 0.000 0.000 E94 " "4:*-20-30-* Def\n" "0 0 20 30 30 0.000 0.000 -0.500 C94 " "0:*-20-30=30 Def\n" "0 0 20 34 0 0.000 0.000 0.198 E94 " "0:*-20-34-* Def\n" "4 0 20 34 0 0.000 0.000 0.198 E94 " "4:*-20-34-* Def\n" "0 0 20 37 0 0.000 0.000 0.000 E94 " "0:*-20-37-* Def\n" "4 0 20 37 0 0.000 0.000 0.000 E94 " "4:*-20-37-* Def\n" "0 0 20 40 0 0.000 0.000 0.297 E94 " "0:*-20-40-* Def\n" "0 0 20 41 0 0.000 0.000 0.000 E94 " "0:*-20-41-* Def\n" "0 0 20 43 0 0.000 0.000 0.297 E94 " "0:*-20-43-* Def\n" "4 0 20 43 0 0.000 0.000 0.297 E94 " "4:*-20-43-* Def\n" "0 0 20 45 0 0.000 0.000 0.000 E94 " "0:*-20-45-* Def\n" "0 0 22 22 0 0.000 0.000 0.236 E94 " "0:*-22-22-* Def\n" "4 0 22 22 0 0.000 0.000 0.236 E94 " "4:*-22-22-* Def\n" "5 0 22 22 0 0.000 0.000 0.236 E94 " "5:*-22-22-* Def\n" "0 0 22 30 0 0.000 0.000 0.000 E94 " "0:*-22-30-* Def\n" "4 0 22 30 0 0.000 0.000 0.000 E94 " "4:*-22-30-* Def\n" "0 0 22 34 0 0.000 0.000 0.198 E94 " "0:*-22-34-* Def\n" "0 0 22 37 0 0.000 0.000 0.000 E94 " "0:*-22-37-* Def\n" "0 0 22 40 0 0.000 0.000 0.297 E94 " "0:*-22-40-* Def\n" "0 0 22 41 0 0.000 0.000 0.000 E94 " "0:*-22-41-* Def\n" "0 0 22 43 0 0.000 0.000 0.297 E94 " "0:*-22-43-* Def\n" "5 0 22 43 0 0.000 0.000 0.297 E94 " "5:*-22-43-* Def\n" "0 0 22 45 0 0.000 0.000 0.000 E94 " "0:*-22-45-* Def\n" "0 0 25 25 0 0.000 0.000 0.267 E94 " "0:*-25-25-* Def\n" "0 0 25 37 0 0.000 0.000 0.000 E94 " "0:*-25-37-* Def\n" "5 0 25 37 0 0.000 0.000 0.000 E94 " "5:*-25-37-* Def\n" "0 0 25 39 0 0.000 0.000 0.000 E94 " "0:*-25-39-* Def\n" "0 0 25 40 0 0.000 0.000 0.316 E94 " "0:*-25-40-* Def\n" "5 0 25 40 0 0.000 0.000 0.316 E94 " "5:*-25-40-* Def\n" "0 0 25 43 0 0.000 0.000 0.250 C94 " "0:*-25-43-* Def\n" "0 1 25 43 1 -2.686 -1.512 0.591 X94\n" "0 1 25 43 28 -3.730 -0.531 0.000 X94\n" "0 32 25 43 1 2.108 1.896 0.965 X94\n" "0 32 25 43 28 2.977 0.732 -0.502 X94\n" "0 0 25 57 0 0.000 0.000 0.000 E94 " "0:*-25-57-* Def\n" "0 0 25 63 0 0.000 0.000 0.000 E94 " "0:*-25-63-* Def\n" "0 0 26 26 0 0.000 0.000 0.600 E94 " "0:*-26-26-* Def\n" "5 0 26 26 0 0.000 0.000 0.600 E94 " "5:*-26-26-* Def\n" "0 0 26 34 0 0.000 0.000 0.316 E94 " "0:*-26-34-* Def\n" "5 0 26 34 0 0.000 0.000 0.316 E94 " "5:*-26-34-* Def\n" "0 0 26 37 0 0.000 1.423 0.000 E94 " "0:*-26-37-* Def\n" "0 0 26 40 0 0.000 0.000 0.474 E94 " "0:*-26-40-* Def\n" "0 0 30 30 0 0.000 12.000 0.000 E94 " "0:*-30-30-* Def\n" "1 0 30 30 0 0.000 1.800 0.000 E94 " "1:*-30-30-* Def\n" "4 0 30 30 0 0.000 1.800 0.000 E94 " "4:*-30-30-* Def\n" "0 0 30 40 0 0.000 3.600 0.000 E94 " "0:*-30-40-* Def\n" "1 0 30 67 0 0.000 1.800 0.000 E94 " "1:*-30-67-* Def\n" "0 0 34 37 0 0.000 0.000 0.000 E94 " "0:*-34-37-* Def\n" "0 0 34 43 0 0.000 0.000 0.250 E94 " "0:*-34-43-* Def\n" "0 0 37 37 0 0.000 7.000 0.000 C94 " "0:*-37-37-* Def\n" "1 0 37 37 0 0.000 2.000 0.000 #E94 " "1:*-37-37-* Def\n" "4 0 37 37 0 0.000 6.000 0.000 E94 " "4:*-37-37-* Def\n" "5 0 37 37 0 0.000 6.000 0.000 E94 " "5:*-37-37-* Def\n" "0 1 37 37 5 0.000 7.000 0.000 C94\n" "0 1 37 37 37 0.000 7.000 0.000 C94\n" "0 2 37 37 5 0.000 7.000 0.000 C94\n" "0 2 37 37 37 0.000 7.000 0.000 C94\n" "0 3 37 37 5 0.000 7.000 0.000 C94\n" "0 3 37 37 37 0.000 7.000 0.000 C94\n" "0 5 37 37 5 0.000 7.000 0.000 C94\n" "0 5 37 37 6 0.000 7.000 0.000 C94\n" "0 5 37 37 15 0.000 7.000 0.000 C94\n" "0 5 37 37 18 0.000 7.000 0.000 X94\n" "0 5 37 37 37 0.000 7.000 0.000 C94\n" "0 5 37 37 40 0.000 7.000 0.000 C94\n" "0 5 37 37 43 0.000 7.000 0.000 X94\n" "0 6 37 37 37 0.000 7.000 0.000 C94\n" "0 15 37 37 37 0.000 7.000 0.000 C94\n" "0 18 37 37 37 0.000 7.000 0.000 X94\n" "0 37 37 37 37 0.000 7.000 0.000 C94\n" "0 37 37 37 40 0.000 7.000 0.000 C94\n" "0 37 37 37 43 0.000 7.000 0.000 X94\n" "0 0 37 38 0 0.000 7.000 0.000 C94 " "0:*-37-38-* Def\n" "0 0 37 39 0 0.000 3.600 0.000 E94 " "0:*-37-39-* Def\n" "1 0 37 39 0 0.000 6.000 0.000 E94 " "1:*-37-39-* Def\n" "0 0 37 40 0 0.000 4.000 0.000 C94 " "0:*-37-40-* Def\n" "5 0 37 40 0 0.000 3.600 0.000 E94 " "5:*-37-40-* Def\n" "0 37 37 40 1 0.000 4.095 0.382 C94S\n" "0 37 37 40 28 0.698 2.542 3.072 C94S\n" "0 0 37 41 0 0.000 1.800 0.000 E94 " "0:*-37-41-* Def\n" "0 0 37 43 0 0.000 2.000 1.800 C94 " "0:*-37-43-* Def\n" "5 0 37 43 0 0.000 3.600 0.000 E94 " "5:*-37-43-* Def\n" "0 37 37 43 18 0.372 2.284 2.034 X94\n" "0 37 37 43 28 0.000 1.694 1.508 X94\n" "0 0 37 45 0 0.000 1.800 0.000 E94 " "0:*-37-45-* Def\n" "0 0 37 46 0 0.000 1.800 0.000 E94 " "0:*-37-46-* Def\n" "0 0 37 55 0 0.000 4.800 0.000 E94 " "0:*-37-55-* Def\n" "0 0 37 56 0 0.000 4.800 0.000 E94 " "0:*-37-56-* Def\n" "1 0 37 57 0 0.000 1.800 0.000 E94 " "1:*-37-57-* Def\n" "0 0 37 58 0 0.000 6.000 0.000 E94 " "0:*-37-58-* Def\n" "1 0 37 58 0 0.000 4.800 0.000 E94 " "1:*-37-58-* Def\n" "0 0 37 62 0 0.000 3.600 0.000 E94 " "0:*-37-62-* Def\n" "0 0 37 63 0 0.000 7.000 0.000 C94 " "0:*-37-63-* Def\n" "1 0 37 63 0 0.000 1.800 0.000 E94 " "1:*-37-63-* Def\n" "0 0 37 64 0 0.000 7.000 0.000 C94 " "0:*-37-64-* Def\n" "1 0 37 64 0 0.000 1.800 0.000 E94 " "1:*-37-64-* Def\n" "1 0 37 67 0 0.000 1.800 0.000 E94 " "1:*-37-67-* Def\n" "0 0 37 69 0 0.000 7.000 0.000 C94 " "0:*-37-69-* Def\n" "0 0 37 78 0 0.000 6.000 0.000 E94 " "0:*-37-78-* Def\n" "0 0 37 81 0 0.000 6.000 0.000 E94 " "0:*-37-81-* Def\n" "1 0 37 81 0 0.000 4.800 0.000 E94 " "1:*-37-81-* Def\n" "0 0 38 38 0 0.000 7.000 0.000 C94 " "0:*-38-38-* Def\n" "0 0 38 58 0 0.000 7.000 0.000 C94 " "0:*-38-58-* Def\n" "0 0 38 63 0 0.000 7.000 0.000 C94 " "0:*-38-63-* Def\n" "0 0 38 64 0 0.000 7.000 0.000 C94 " "0:*-38-64-* Def\n" "0 0 38 69 0 0.000 6.000 0.000 E94 " "0:*-38-69-* Def\n" "0 0 38 78 0 0.000 6.000 0.000 E94 " "0:*-38-78-* Def\n" "0 0 39 40 0 0.000 0.000 0.000 E94 " "0:*-39-40-* Def\n" "0 0 39 45 0 0.000 6.000 0.000 E94 " "0:*-39-45-* Def\n" "0 0 39 63 0 0.000 4.000 0.000 C94 " "0:*-39-63-* Def\n" "1 0 39 63 0 0.000 6.000 0.000 E94 " "1:*-39-63-* Def\n" "5 0 39 63 0 0.000 3.600 0.000 E94 " "5:*-39-63-* Def\n" "0 1 39 63 5 0.000 4.000 0.000 C94\n" "0 1 39 63 64 0.000 4.000 0.000 C94\n" "0 18 39 63 5 0.000 4.000 0.000 X94\n" "0 18 39 63 64 0.000 4.000 0.000 X94\n" "0 63 39 63 5 0.000 4.000 0.000 C94\n" "0 63 39 63 64 0.000 4.000 0.000 C94\n" "0 0 39 64 0 0.000 3.600 0.000 E94 " "0:*-39-64-* Def\n" "1 0 39 64 0 0.000 6.000 0.000 E94 " "1:*-39-64-* Def\n" "0 0 39 65 0 0.000 4.000 0.000 C94 " "0:*-39-65-* Def\n" "0 0 39 78 0 0.000 3.600 0.000 E94 " "0:*-39-78-* Def\n" "0 0 40 40 0 0.000 0.000 0.375 E94 " "0:*-40-40-* Def\n" "0 0 40 45 0 0.000 3.600 0.000 E94 " "0:*-40-45-* Def\n" "0 0 40 46 0 0.000 3.600 0.000 E94 " "0:*-40-46-* Def\n" "0 0 40 54 0 0.000 3.600 0.000 E94 " "0:*-40-54-* Def\n" "2 0 40 54 0 0.000 3.600 0.000 E94 " "2:*-40-54-* Def\n" "0 0 40 63 0 0.000 3.600 0.000 E94 " "0:*-40-63-* Def\n" "0 0 40 64 0 0.000 3.600 0.000 E94 " "0:*-40-64-* Def\n" "0 0 40 78 0 0.000 3.600 0.000 E94 " "0:*-40-78-* Def\n" "0 0 41 41 0 0.000 1.800 0.000 E94 " "0:*-41-41-* Def\n" "0 0 41 55 0 0.000 4.800 0.000 E94 " "0:*-41-55-* Def\n" "0 0 41 62 0 0.000 3.600 0.000 E94 " "0:*-41-62-* Def\n" "0 0 41 80 0 0.000 1.800 0.000 E94 " "0:*-41-80-* Def\n" "0 0 43 43 0 0.000 0.000 0.375 E94 " "0:*-43-43-* Def\n" "0 0 43 45 0 0.000 3.600 0.000 E94 " "0:*-43-45-* Def\n" "0 0 43 64 0 0.000 3.600 0.000 E94 " "0:*-43-64-* Def\n" "0 0 44 57 0 0.000 7.000 0.000 C94 " "0:*-44-57-* Def\n" "0 0 44 63 0 0.000 7.000 0.000 C94 " "0:*-44-63-* Def\n" "0 0 44 65 0 0.000 7.000 0.000 C94 " "0:*-44-65-* Def\n" "0 0 44 78 0 0.000 2.846 0.000 E94 " "0:*-44-78-* Def\n" "0 0 44 80 0 0.000 2.846 0.000 E94 " "0:*-44-80-* Def\n" "0 0 45 63 0 0.000 1.800 0.000 E94 " "0:*-45-63-* Def\n" "0 0 45 64 0 0.000 1.800 0.000 E94 " "0:*-45-64-* Def\n" "0 0 45 78 0 0.000 1.800 0.000 E94 " "0:*-45-78-* Def\n" "0 0 55 57 0 0.000 10.000 0.000 C94 " "0:*-55-57-* Def\n" "2 0 55 57 0 0.000 4.800 0.000 E94 " "2:*-55-57-* Def\n" "5 0 55 57 0 0.000 4.800 0.000 E94 " "5:*-55-57-* Def\n" "0 1 55 57 5 0.423 12.064 0.090 C94\n" "0 1 55 57 55 -0.428 12.044 0.000 C94\n" "0 36 55 57 5 -0.268 8.077 -0.806 C94\n" "0 36 55 57 55 0.273 8.025 0.692 C94\n" "0 0 55 62 0 0.000 3.600 0.000 E94 " "0:*-55-62-* Def\n" "0 0 55 64 0 0.000 4.800 0.000 E94 " "0:*-55-64-* Def\n" "0 0 55 80 0 0.000 4.800 0.000 E94 " "0:*-55-80-* Def\n" "0 0 56 57 0 0.000 6.000 0.000 C94 " "0:*-56-57-* Def\n" "0 1 56 57 56 0.000 6.886 -0.161 C94\n" "0 36 56 57 56 0.000 4.688 0.107 C94\n" "0 0 56 63 0 0.000 4.800 0.000 E94 " "0:*-56-63-* Def\n" "0 0 56 80 0 0.000 4.800 0.000 E94 " "0:*-56-80-* Def\n" "1 0 57 63 0 0.000 1.800 0.000 E94 " "1:*-57-63-* Def\n" "1 0 57 64 0 0.000 1.800 0.000 E94 " "1:*-57-64-* Def\n" "0 0 58 63 0 0.000 6.000 0.000 E94 " "0:*-58-63-* Def\n" "0 0 58 64 0 0.000 6.000 0.000 E94 " "0:*-58-64-* Def\n" "0 0 59 63 0 0.000 7.000 0.000 C94 " "0:*-59-63-* Def\n" "0 0 59 65 0 0.000 7.000 0.000 C94 " "0:*-59-65-* Def\n" "0 0 59 78 0 0.000 3.600 0.000 E94 " "0:*-59-78-* Def\n" "0 0 59 80 0 0.000 3.600 0.000 E94 " "0:*-59-80-* Def\n" "0 0 59 82 0 0.000 3.600 0.000 E94 " "0:*-59-82-* Def\n" "0 0 62 63 0 0.000 3.600 0.000 E94 " "0:*-62-63-* Def\n" "0 0 62 64 0 0.000 3.600 0.000 E94 " "0:*-62-64-* Def\n" "1 0 63 63 0 0.000 1.800 0.000 E94 " "1:*-63-63-* Def\n" "0 0 63 64 0 0.000 7.000 0.000 C94 " "0:*-63-64-* Def\n" "0 5 63 64 5 0.000 7.000 0.000 C94\n" "0 5 63 64 64 0.000 7.000 0.000 C94\n" "0 39 63 64 5 0.000 7.000 0.000 C94\n" "0 39 63 64 64 0.000 7.000 0.000 C94\n" "0 0 63 66 0 0.000 7.000 0.000 C94 " "0:*-63-66-* Def\n" "0 0 63 78 0 0.000 6.000 0.000 E94 " "0:*-63-78-* Def\n" "0 0 63 81 0 0.000 6.000 0.000 E94 " "0:*-63-81-* Def\n" "0 0 64 64 0 0.000 7.000 0.000 C94 " "0:*-64-64-* Def\n" "1 0 64 64 0 0.000 1.800 0.000 E94 " "1:*-64-64-* Def\n" "0 5 64 64 5 0.000 7.000 0.000 C94\n" "0 5 64 64 63 0.000 7.000 0.000 C94\n" "0 63 64 64 63 0.000 7.000 0.000 C94\n" "0 0 64 65 0 0.000 7.000 0.000 C94 " "0:*-64-65-* Def\n" "0 0 64 66 0 0.000 7.000 0.000 C94 " "0:*-64-66-* Def\n" "0 0 64 78 0 0.000 6.000 0.000 E94 " "0:*-64-78-* Def\n" "0 0 64 81 0 0.000 6.000 0.000 E94 " "0:*-64-81-* Def\n" "5 0 64 81 0 0.000 6.000 0.000 E94 " "5:*-64-81-* Def\n" "0 0 64 82 0 0.000 6.000 0.000 E94 " "0:*-64-82-* Def\n" "0 0 65 66 0 0.000 7.000 0.000 C94 " "0:*-65-66-* Def\n" "0 0 65 78 0 0.000 6.000 0.000 E94 " "0:*-65-78-* Def\n" "0 0 65 81 0 0.000 6.000 0.000 E94 " "0:*-65-81-* Def\n" "0 0 65 82 0 0.000 6.000 0.000 E94 " "0:*-65-82-* Def\n" "0 0 66 66 0 0.000 7.000 0.000 C94 " "0:*-66-66-* Def\n" "0 0 66 78 0 0.000 6.000 0.000 E94 " "0:*-66-78-* Def\n" "0 0 66 81 0 0.000 6.000 0.000 E94 " "0:*-66-81-* Def\n" "0 0 67 67 0 0.000 12.000 0.000 E94 " "0:*-67-67-* Def\n" "5 0 67 67 0 0.000 12.000 0.000 E94 " "5:*-67-67-* Def\n" "0 0 76 76 0 0.000 3.600 0.000 E94 " "0:*-76-76-* Def\n" "0 0 76 78 0 0.000 3.600 0.000 E94 " "0:*-76-78-* Def\n" "0 0 78 78 0 0.000 7.000 0.000 C94 " "0:*-78-78-* Def\n" "0 0 78 79 0 0.000 6.000 0.000 E94 " "0:*-78-79-* Def\n" "0 0 78 81 0 0.000 4.000 0.000 C94 " "0:*-78-81-* Def\n" "0 0 79 79 0 0.000 6.000 0.000 E94 " "0:*-79-79-* Def\n" "0 0 79 81 0 0.000 6.000 0.000 E94 " "0:*-79-81-* Def\n" "0 0 80 81 0 0.000 4.000 0.000 C94 " "0:*-80-81-* Def\n"; class std::unique_ptr<MMFFVdWCollection> MMFFVdWCollection::ds_instance = nullptr; extern const std::string defaultMMFFVdW; MMFFVdWCollection *MMFFVdWCollection::getMMFFVdW(const std::string &mmffVdW) { if (!ds_instance || !mmffVdW.empty()) { ds_instance.reset(new MMFFVdWCollection(mmffVdW)); } return ds_instance.get(); } MMFFVdWCollection::MMFFVdWCollection(std::string mmffVdW) { if (mmffVdW.empty()) { mmffVdW = defaultMMFFVdW; } std::istringstream inStream(mmffVdW); bool firstLine = true; std::string inLine = RDKit::getLine(inStream); while (!(inStream.eof())) { if (inLine[0] != '*') { boost::char_separator<char> tabSep("\t"); tokenizer tokens(inLine, tabSep); tokenizer::iterator token = tokens.begin(); if (firstLine) { firstLine = false; this->power = boost::lexical_cast<double>(*token); ++token; this->B = boost::lexical_cast<double>(*token); ++token; this->Beta = boost::lexical_cast<double>(*token); ++token; this->DARAD = boost::lexical_cast<double>(*token); ++token; this->DAEPS = boost::lexical_cast<double>(*token); ++token; } else { MMFFVdW mmffVdWObj; #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP unsigned int atomType = boost::lexical_cast<unsigned int>(*token); #else d_atomType.push_back( (std::uint8_t)(boost::lexical_cast<unsigned int>(*token))); #endif ++token; mmffVdWObj.alpha_i = boost::lexical_cast<double>(*token); ++token; mmffVdWObj.N_i = boost::lexical_cast<double>(*token); ++token; mmffVdWObj.A_i = boost::lexical_cast<double>(*token); ++token; mmffVdWObj.G_i = boost::lexical_cast<double>(*token); ++token; mmffVdWObj.DA = (boost::lexical_cast<std::string>(*token)).at(0); ++token; mmffVdWObj.R_star = mmffVdWObj.A_i * pow(mmffVdWObj.alpha_i, this->power); #ifdef RDKIT_MMFF_PARAMS_USE_STD_MAP d_params[atomType] = mmffVdWObj; #else d_params.push_back(mmffVdWObj); #endif } } inLine = RDKit::getLine(inStream); } } const std::string defaultMMFFVdW = "*\n" "* Copyright (c) Merck and Co., Inc., 1994, 1995, 1996\n" "* All Rights Reserved\n" "*\n" "* E94 - From empirical rule (JACS 1992, 114, 7827) \n" "* C94 - Adjusted in fit to HF/6-31G* dimer energies and geometries\n" "* X94 - Chosen in the extension of the paratererization for MMFF \n" "* by analogy to other, similar atom types or, for ions, by\n" "* fitting to atomic radii (and sometimes to association energies\n" "* for hydrates)\n" "*\n" "* power B Beta DARAD DAEPS\n" "0.25 0.2 12. 0.8 0.5\n" "*\n" "* type alpha-i N-i A-i G-i DA Symb Origin\n" "*------------------------------------------------------------\n" "1 1.050 2.490 3.890 1.282 - CR E94\n" "2 1.350 2.490 3.890 1.282 - C=C E94\n" "3 1.100 2.490 3.890 1.282 - C=O E94\n" "4 1.300 2.490 3.890 1.282 - CSP E94\n" "5 0.250 0.800 4.200 1.209 - HC C94\n" "6 0.70 3.150 3.890 1.282 A OR C94\n" "7 0.65 3.150 3.890 1.282 A O=C C94\n" "8 1.15 2.820 3.890 1.282 A NR C94\n" "9 0.90 2.820 3.890 1.282 A N=C C94\n" "10 1.000 2.820 3.890 1.282 A NC=O E94\n" "11 0.35 3.480 3.890 1.282 A F C94\n" "12 2.300 5.100 3.320 1.345 A CL E94\n" "13 3.400 6.000 3.190 1.359 A BR E94\n" "14 5.500 6.950 3.080 1.404 A I E94\n" "15 3.00 4.800 3.320 1.345 A S C94\n" "16 3.900 4.800 3.320 1.345 A S=C E94\n" "17 2.700 4.800 3.320 1.345 - SO E94\n" "18 2.100 4.800 3.320 1.345 - SO2 E94\n" "19 4.500 4.200 3.320 1.345 - SI E94\n" "20 1.050 2.490 3.890 1.282 - CR3R E94\n" "21 0.150 0.800 4.200 1.209 D HOR C94\n" "22 1.100 2.490 3.890 1.282 - CR3R E94\n" "23 0.150 0.800 4.200 1.209 D HNR C94\n" "24 0.150 0.800 4.200 1.209 D HOCO C94\n" "25 1.600 4.500 3.320 1.345 - PO4 E94\n" "26 3.600 4.500 3.320 1.345 A P E94\n" "27 0.150 0.800 4.200 1.209 D HN=C C94\n" "28 0.150 0.800 4.200 1.209 D HNCO C94\n" "29 0.150 0.800 4.200 1.209 D HOCC C94\n" "30 1.350 2.490 3.890 1.282 - CE4R E94\n" "31 0.150 0.800 4.200 1.209 D HOH C94\n" "32 0.75 3.150 3.890 1.282 A O2CM C94\n" "33 0.150 0.800 4.200 1.209 D HOS C94\n" "34 1.00 2.820 3.890 1.282 - NR+ C94\n" "35 1.50 3.150 3.890 1.282 A OM X94\n" "36 0.150 0.800 4.200 1.209 D HNR+ C94\n" "37 1.350 2.490 3.890 1.282 - CB E94\n" "38 0.85 2.820 3.890 1.282 A NPYD C94\n" "39 1.10 2.820 3.890 1.282 - NPYL C94\n" "40 1.00 2.820 3.890 1.282 A NC=C E94\n" "41 1.100 2.490 3.890 1.282 - CO2M C94\n" "42 1.000 2.820 3.890 1.282 A NSP E94\n" "43 1.000 2.820 3.890 1.282 A NSO2 E94\n" "44 3.00 4.800 3.320 1.345 A STHI C94\n" "45 1.150 2.820 3.890 1.282 - NO2 E94\n" "46 1.300 2.820 3.890 1.282 - N=O E94\n" "47 1.000 2.820 3.890 1.282 A NAZT X94\n" "48 1.200 2.820 3.890 1.282 A NSO X94\n" "49 1.00 3.150 3.890 1.282 - O+ X94\n" "50 0.150 0.800 4.200 1.209 D HO+ C94\n" "51 0.400 3.150 3.890 1.282 - O=+ E94\n" "52 0.150 0.800 4.200 1.209 D HO=+ C94\n" "53 1.000 2.820 3.890 1.282 - =N= X94\n" "54 1.30 2.820 3.890 1.282 - N+=C C94\n" "55 0.80 2.820 3.890 1.282 - NCN+ E94\n" "56 0.80 2.820 3.890 1.282 - NGD+ E94\n" "57 1.000 2.490 3.890 1.282 - CNN+ E94\n" "58 0.80 2.820 3.890 1.282 - NPD+ E94\n" "59 0.65 3.150 3.890 1.282 A OFUR C94\n" "60 1.800 2.490 3.890 1.282 A C%- E94\n" "61 0.800 2.820 3.890 1.282 A NR% E94\n" "62 1.300 2.820 3.890 1.282 A NM X94\n" "63 1.350 2.490 3.890 1.282 - C5A E94\n" "64 1.350 2.490 3.890 1.282 - C5B E94\n" "65 1.000 2.820 3.890 1.282 A N5A E94\n" "66 0.75 2.820 3.890 1.282 A N5B C94\n" "67 0.950 2.82 3.890 1.282 A N2OX X94\n" "68 0.90 2.82 3.890 1.282 A N3OX C94\n" "69 0.950 2.82 3.890 1.282 A NPOX C94\n" "70 0.87 3.150 3.890 1.282 A OH2 C94\n" "71 0.150 0.800 4.200 1.209 D HS C94\n" "72 4.000 4.800 3.320 1.345 A SM X94\n" "73 3.000 4.800 3.320 1.345 - SMO2 X94\n" "74 3.000 4.800 3.320 1.345 - =S=O X94\n" "75 4.000 4.500 3.320 1.345 A -P=C X94\n" "76 1.200 2.820 3.890 1.282 A N5M X94\n" "77 1.500 5.100 3.320 1.345 A CLO4 X94\n" "78 1.350 2.490 3.890 1.282 - C5 X94\n" "79 1.000 2.820 3.890 1.282 A N5 X94\n" "80 1.000 2.490 3.890 1.282 - CIM+ C94\n" "81 0.80 2.820 3.890 1.282 - NIM+ C94\n" "82 0.950 2.82 3.890 1.282 A N5OX X94\n" "87 0.45 6.0 4.0 1.4 - FE+2 X94\n" "88 0.55 6.0 4.0 1.4 - FE+3 X94\n" "89 1.4 3.48 3.890 1.282 A F- X94\n" "90 4.5 5.100 3.320 1.345 A CL- X94\n" "91 6.0 6.000 3.190 1.359 A BR- X94\n" "92 0.15 2.0 4.0 1.3 - LI+ X94\n" "93 0.4 3.5 4.0 1.3 - NA+ X94\n" "94 1.0 5.0 4.0 1.3 - K+ X94\n" "95 0.43 6.0 4.0 1.4 - ZN+2 X94\n" "96 0.9 5.0 4.0 1.4 - CA+2 X94\n" "97 0.35 6.0 4.0 1.4 - CU+1 X94\n" "98 0.40 6.0 4.0 1.4 - CU+2 X94\n" "99 0.35 3.5 4.0 1.3 - MG+2 X94\n"; } // end of namespace MMFF } // end of namespace ForceFields
1
19,387
Won't this leak like a sieve? The caller can't delete this as one is a unique_ptr and one is not.
rdkit-rdkit
cpp
@@ -360,6 +360,15 @@ class LocalStorage implements IStorage { name, (data, cb) => { if (data._attachments[filename]) { + // get version form _attachments; + // there aways had 'version' in verdaccio publish package. + // but there was no 'version' in package witch unlink form npmjs + // see https://github.com/verdaccio/verdaccio/issues/1359 + const version = data._attachments[filename].version; + if (version && data.versions[version] && data.time[version]) { + delete data.versions[version]; + delete data.time[version]; + } delete data._attachments[filename]; cb(); } else {
1
/** * @prettier * @flow */ import assert from 'assert'; import UrlNode from 'url'; import _ from 'lodash'; import { ErrorCode, isObject, getLatestVersion, tagVersion, validateName } from './utils'; import { generatePackageTemplate, normalizePackage, generateRevision, getLatestReadme, cleanUpReadme, normalizeContributors } from './storage-utils'; import { API_ERROR, DIST_TAGS, STORAGE, USERS } from './constants'; import { createTarballHash } from './crypto-utils'; import { prepareSearchPackage } from './storage-utils'; import loadPlugin from '../lib/plugin-loader'; import LocalDatabase from '@verdaccio/local-storage'; import { UploadTarball, ReadTarball } from '@verdaccio/streams'; import type { Package, Config, MergeTags, Version, DistFile, Callback, Logger } from '@verdaccio/types'; import type { ILocalData, IPackageStorage } from '@verdaccio/local-storage'; import type { IUploadTarball, IReadTarball } from '@verdaccio/streams'; import type { IStorage, StringValue } from '../../types'; /** * Implements Storage interface (same for storage.js, local-storage.js, up-storage.js). */ class LocalStorage implements IStorage { config: Config; localData: ILocalData; logger: Logger; constructor(config: Config, logger: Logger) { this.logger = logger.child({ sub: 'fs' }); this.config = config; this.localData = this._loadStorage(config, logger); } addPackage(name: string, pkg: Package, callback: Callback) { const storage: any = this._getLocalStorage(name); if (_.isNil(storage)) { return callback(ErrorCode.getNotFound('this package cannot be added')); } storage.createPackage(name, generatePackageTemplate(name), err => { if (_.isNull(err) === false && err.code === STORAGE.FILE_EXIST_ERROR) { return callback(ErrorCode.getConflict()); } const latest = getLatestVersion(pkg); if (_.isNil(latest) === false && pkg.versions[latest]) { return callback(null, pkg.versions[latest]); } return callback(); }); } /** * Remove package. * @param {*} name * @param {*} callback * @return {Function} */ removePackage(name: string, callback: Callback) { const storage: any = this._getLocalStorage(name); if (_.isNil(storage)) { return callback(ErrorCode.getNotFound()); } storage.readPackage(name, (err, data) => { if (_.isNil(err) === false) { if (err.code === STORAGE.NO_SUCH_FILE_ERROR) { return callback(ErrorCode.getNotFound()); } else { return callback(err); } } data = normalizePackage(data); this.localData.remove(name, removeFailed => { if (removeFailed) { // This will happen when database is locked return callback(ErrorCode.getBadData(removeFailed.message)); } storage.deletePackage(STORAGE.PACKAGE_FILE_NAME, err => { if (err) { return callback(err); } const attachments = Object.keys(data._attachments); this._deleteAttachments(storage, attachments, callback); }); }); }); } /** * Synchronize remote package info with the local one * @param {*} name * @param {*} packageInfo * @param {*} callback */ updateVersions(name: string, packageInfo: Package, callback: Callback) { this._readCreatePackage(name, (err, packageLocalJson) => { if (err) { return callback(err); } let change = false; // updating readme packageLocalJson.readme = getLatestReadme(packageInfo); if (packageInfo.readme !== packageLocalJson.readme) { change = true; } for (const versionId in packageInfo.versions) { if (_.isNil(packageLocalJson.versions[versionId])) { let version = packageInfo.versions[versionId]; // we don't keep readme for package versions, // only one readme per package version = cleanUpReadme(version); version.contributors = normalizeContributors(version.contributors); change = true; packageLocalJson.versions[versionId] = version; if (version.dist && version.dist.tarball) { const urlObject: any = UrlNode.parse(version.dist.tarball); const filename = urlObject.pathname.replace(/^.*\//, ''); // we do NOT overwrite any existing records if (_.isNil(packageLocalJson._distfiles[filename])) { const hash: DistFile = (packageLocalJson._distfiles[filename] = { url: version.dist.tarball, sha: version.dist.shasum, }); /* eslint spaced-comment: 0 */ // $FlowFixMe const upLink: string = version[Symbol.for('__verdaccio_uplink')]; if (_.isNil(upLink) === false) { this._updateUplinkToRemoteProtocol(hash, upLink); } } } } } for (const tag in packageInfo[DIST_TAGS]) { if (!packageLocalJson[DIST_TAGS][tag] || packageLocalJson[DIST_TAGS][tag] !== packageInfo[DIST_TAGS][tag]) { change = true; packageLocalJson[DIST_TAGS][tag] = packageInfo[DIST_TAGS][tag]; } } for (const up in packageInfo._uplinks) { if (Object.prototype.hasOwnProperty.call(packageInfo._uplinks, up)) { const need_change = !isObject(packageLocalJson._uplinks[up]) || packageInfo._uplinks[up].etag !== packageLocalJson._uplinks[up].etag || packageInfo._uplinks[up].fetched !== packageLocalJson._uplinks[up].fetched; if (need_change) { change = true; packageLocalJson._uplinks[up] = packageInfo._uplinks[up]; } } } if ('time' in packageInfo && !_.isEqual(packageLocalJson.time, packageInfo.time)) { packageLocalJson.time = packageInfo.time; change = true; } if (change) { this.logger.debug({ name }, 'updating package @{name} info'); this._writePackage(name, packageLocalJson, function(err) { callback(err, packageLocalJson); }); } else { callback(null, packageLocalJson); } }); } /** * Add a new version to a previous local package. * @param {*} name * @param {*} version * @param {*} metadata * @param {*} tag * @param {*} callback */ addVersion(name: string, version: string, metadata: Version, tag: StringValue, callback: Callback) { this._updatePackage( name, (data, cb) => { // keep only one readme per package data.readme = metadata.readme; // TODO: lodash remove metadata = cleanUpReadme(metadata); metadata.contributors = normalizeContributors(metadata.contributors); const hasVersion = data.versions[version] != null; if (hasVersion) { return cb(ErrorCode.getConflict()); } // if uploaded tarball has a different shasum, it's very likely that we have some kind of error if (isObject(metadata.dist) && _.isString(metadata.dist.tarball)) { const tarball = metadata.dist.tarball.replace(/.*\//, ''); if (isObject(data._attachments[tarball])) { if (_.isNil(data._attachments[tarball].shasum) === false && _.isNil(metadata.dist.shasum) === false) { if (data._attachments[tarball].shasum != metadata.dist.shasum) { const errorMessage = `shasum error, ${data._attachments[tarball].shasum} != ${metadata.dist.shasum}`; return cb(ErrorCode.getBadRequest(errorMessage)); } } const currentDate = new Date().toISOString(); // some old storage do not have this field #740 if (_.isNil(data.time)) { data.time = {}; } data.time['modified'] = currentDate; if ('created' in data.time === false) { data.time.created = currentDate; } data.time[version] = currentDate; data._attachments[tarball].version = version; } } data.versions[version] = metadata; tagVersion(data, version, tag); this.localData.add(name, addFailed => { if (addFailed) { return cb(ErrorCode.getBadData(addFailed.message)); } cb(); }); }, callback ); } /** * Merge a new list of tags for a local packages with the existing one. * @param {*} pkgName * @param {*} tags * @param {*} callback */ mergeTags(pkgName: string, tags: MergeTags, callback: Callback) { this._updatePackage( pkgName, (data, cb) => { /* eslint guard-for-in: 0 */ for (const tag: string in tags) { // this handle dist-tag rm command if (_.isNull(tags[tag])) { delete data[DIST_TAGS][tag]; continue; } if (_.isNil(data.versions[tags[tag]])) { return cb(this._getVersionNotFound()); } const version: string = tags[tag]; tagVersion(data, version, tag); } cb(); }, callback ); } /** * Return version not found * @return {String} * @private */ _getVersionNotFound() { return ErrorCode.getNotFound(API_ERROR.VERSION_NOT_EXIST); } /** * Return file no available * @return {String} * @private */ _getFileNotAvailable() { return ErrorCode.getNotFound('no such file available'); } /** * Update the package metadata, tags and attachments (tarballs). * Note: Currently supports unpublishing only. * @param {*} name * @param {*} incomingPkg * @param {*} revision * @param {*} callback * @return {Function} */ changePackage(name: string, incomingPkg: Package, revision?: string, callback: Callback) { if (!isObject(incomingPkg.versions) || !isObject(incomingPkg[DIST_TAGS])) { return callback(ErrorCode.getBadData()); } this._updatePackage( name, (localData, cb) => { for (const version in localData.versions) { if (_.isNil(incomingPkg.versions[version])) { this.logger.info({ name: name, version: version }, 'unpublishing @{name}@@{version}'); delete localData.versions[version]; delete localData.time[version]; for (const file in localData._attachments) { if (localData._attachments[file].version === version) { delete localData._attachments[file].version; } } } } localData[USERS] = incomingPkg[USERS]; localData[DIST_TAGS] = incomingPkg[DIST_TAGS]; cb(); }, function(err) { if (err) { return callback(err); } callback(); } ); } /** * Remove a tarball. * @param {*} name * @param {*} filename * @param {*} revision * @param {*} callback */ removeTarball(name: string, filename: string, revision: string, callback: Callback) { assert(validateName(filename)); this._updatePackage( name, (data, cb) => { if (data._attachments[filename]) { delete data._attachments[filename]; cb(); } else { cb(this._getFileNotAvailable()); } }, err => { if (err) { return callback(err); } const storage = this._getLocalStorage(name); if (storage) { storage.deletePackage(filename, callback); } } ); } /** * Add a tarball. * @param {String} name * @param {String} filename * @return {Stream} */ addTarball(name: string, filename: string) { assert(validateName(filename)); let length = 0; const shaOneHash = createTarballHash(); const uploadStream: IUploadTarball = new UploadTarball(); const _transform = uploadStream._transform; const storage = this._getLocalStorage(name); (uploadStream: any).abort = function() {}; (uploadStream: any).done = function() {}; uploadStream._transform = function(data, ...args) { shaOneHash.update(data); // measure the length for validation reasons length += data.length; const appliedData = [data, ...args]; _transform.apply(uploadStream, appliedData); }; if (name === '__proto__') { process.nextTick(() => { uploadStream.emit('error', ErrorCode.getForbidden()); }); return uploadStream; } if (!storage) { process.nextTick(() => { uploadStream.emit('error', "can't upload this package"); }); return uploadStream; } const writeStream: IUploadTarball = storage.writeTarball(filename); writeStream.on('error', err => { if (err.code === STORAGE.FILE_EXIST_ERROR) { uploadStream.emit('error', ErrorCode.getConflict()); uploadStream.abort(); } else if (err.code === STORAGE.NO_SUCH_FILE_ERROR) { // check if package exists to throw an appropriate message this.getPackageMetadata(name, function(_err, res) { if (_err) { uploadStream.emit('error', _err); } else { uploadStream.emit('error', err); } }); } else { uploadStream.emit('error', err); } }); writeStream.on('open', function() { // re-emitting open because it's handled in storage.js uploadStream.emit('open'); }); writeStream.on('success', () => { this._updatePackage( name, function updater(data, cb) { data._attachments[filename] = { shasum: shaOneHash.digest('hex'), }; cb(); }, function(err) { if (err) { uploadStream.emit('error', err); } else { uploadStream.emit('success'); } } ); }); (uploadStream: any).abort = function() { writeStream.abort(); }; (uploadStream: any).done = function() { if (!length) { uploadStream.emit('error', ErrorCode.getBadData('refusing to accept zero-length file')); writeStream.abort(); } else { writeStream.done(); } }; uploadStream.pipe(writeStream); return uploadStream; } /** * Get a tarball. * @param {*} name * @param {*} filename * @return {ReadTarball} */ getTarball(name: string, filename: string): IReadTarball { assert(validateName(filename)); const storage: IPackageStorage = this._getLocalStorage(name); if (_.isNil(storage)) { return this._createFailureStreamResponse(); } return this._streamSuccessReadTarBall(storage, filename); } /** * Return a stream that emits a read failure. * @private * @return {ReadTarball} */ _createFailureStreamResponse(): IReadTarball { const stream: IReadTarball = new ReadTarball(); process.nextTick(() => { stream.emit('error', this._getFileNotAvailable()); }); return stream; } /** * Return a stream that emits the tarball data * @param {Object} storage * @param {String} filename * @private * @return {ReadTarball} */ _streamSuccessReadTarBall(storage: any, filename: string): IReadTarball { const stream: IReadTarball = new ReadTarball(); const readTarballStream = storage.readTarball(filename); const e404 = ErrorCode.getNotFound; (stream: any).abort = function() { if (_.isNil(readTarballStream) === false) { readTarballStream.abort(); } }; readTarballStream.on('error', function(err) { if (err && err.code === STORAGE.NO_SUCH_FILE_ERROR) { stream.emit('error', e404('no such file available')); } else { stream.emit('error', err); } }); readTarballStream.on('content-length', function(v) { stream.emit('content-length', v); }); readTarballStream.on('open', function() { // re-emitting open because it's handled in storage.js stream.emit('open'); readTarballStream.pipe(stream); }); return stream; } /** * Retrieve a package by name. * @param {*} name * @param {*} callback * @return {Function} */ getPackageMetadata(name: string, callback?: Callback = () => {}): void { const storage: IPackageStorage = this._getLocalStorage(name); if (_.isNil(storage)) { return callback(ErrorCode.getNotFound()); } this._readPackage(name, storage, callback); } /** * Search a local package. * @param {*} startKey * @param {*} options * @return {Function} */ search(startKey: string, options: any) { const stream = new ReadTarball({ objectMode: true }); this._searchEachPackage( (item, cb) => { if (item.time > parseInt(startKey, 10)) { this.getPackageMetadata(item.name, (err: Error, data: Package) => { if (err) { return cb(err); } const time = new Date(item.time).toISOString(); const result = prepareSearchPackage(data, time); if (_.isNil(result) === false) { stream.push(result); } cb(); }); } else { cb(); } }, function onEnd(err) { if (err) { return stream.emit('error', err); } stream.end(); } ); return stream; } /** * Retrieve a wrapper that provide access to the package location. * @param {Object} pkgName package name. * @return {Object} */ _getLocalStorage(pkgName: string): IPackageStorage { return this.localData.getPackageStorage(pkgName); } /** * Read a json file from storage. * @param {Object} storage * @param {Function} callback */ _readPackage(name: string, storage: any, callback: Callback) { storage.readPackage(name, (err, result) => { if (err) { if (err.code === STORAGE.NO_SUCH_FILE_ERROR) { return callback(ErrorCode.getNotFound()); } else { return callback(this._internalError(err, STORAGE.PACKAGE_FILE_NAME, 'error reading')); } } callback(err, normalizePackage(result)); }); } /** * Walks through each package and calls `on_package` on them. * @param {*} onPackage * @param {*} onEnd */ _searchEachPackage(onPackage: Callback, onEnd: Callback) { // save wait whether plugin still do not support search functionality if (_.isNil(this.localData.search)) { this.logger.warn('plugin search not implemented yet'); onEnd(); } else { this.localData.search(onPackage, onEnd, validateName); } } /** * Retrieve either a previous created local package or a boilerplate. * @param {*} pkgName * @param {*} callback * @return {Function} */ _readCreatePackage(pkgName: string, callback: Callback) { const storage: any = this._getLocalStorage(pkgName); if (_.isNil(storage)) { return this._createNewPackage(pkgName, callback); } storage.readPackage(pkgName, (err, data) => { // TODO: race condition if (_.isNil(err) === false) { if (err.code === STORAGE.NO_SUCH_FILE_ERROR) { data = generatePackageTemplate(pkgName); } else { return callback(this._internalError(err, STORAGE.PACKAGE_FILE_NAME, 'error reading')); } } callback(null, normalizePackage(data)); }); } _createNewPackage(name: string, callback: Callback): Callback { return callback(null, normalizePackage(generatePackageTemplate(name))); } /** * Handle internal error * @param {*} err * @param {*} file * @param {*} message * @return {Object} Error instance */ _internalError(err: string, file: string, message: string) { this.logger.error({ err: err, file: file }, `${message} @{file}: @{!err.message}`); return ErrorCode.getInternalError(); } /** * @param {*} name package name * @param {*} updateHandler function(package, cb) - update function * @param {*} callback callback that gets invoked after it's all updated * @return {Function} */ _updatePackage(name: string, updateHandler: Callback, callback: Callback) { const storage: IPackageStorage = this._getLocalStorage(name); if (!storage) { return callback(ErrorCode.getNotFound()); } storage.updatePackage(name, updateHandler, this._writePackage.bind(this), normalizePackage, callback); } /** * Update the revision (_rev) string for a package. * @param {*} name * @param {*} json * @param {*} callback * @return {Function} */ _writePackage(name: string, json: Package, callback: Callback) { const storage: any = this._getLocalStorage(name); if (_.isNil(storage)) { return callback(); } storage.savePackage(name, this._setDefaultRevision(json), callback); } _setDefaultRevision(json: Package) { // calculate revision from couch db if (_.isString(json._rev) === false) { json._rev = STORAGE.DEFAULT_REVISION; } // this is intended in debug mode we do not want modify the store revision if (_.isNil(this.config._debug)) { json._rev = generateRevision(json._rev); } return json; } _deleteAttachments(storage: any, attachments: string[], callback: Callback): void { const unlinkNext = function(cb) { if (_.isEmpty(attachments)) { return cb(); } const attachment = attachments.shift(); storage.deletePackage(attachment, function() { unlinkNext(cb); }); }; unlinkNext(function() { // try to unlink the directory, but ignore errors because it can fail storage.removePackage(function(err) { callback(err); }); }); } /** * Ensure the dist file remains as the same protocol * @param {Object} hash metadata * @param {String} upLinkKey registry key * @private */ _updateUplinkToRemoteProtocol(hash: DistFile, upLinkKey: string): void { // if we got this information from a known registry, // use the same protocol for the tarball // // see https://github.com/rlidwka/sinopia/issues/166 const tarballUrl: any = UrlNode.parse(hash.url); const uplinkUrl: any = UrlNode.parse(this.config.uplinks[upLinkKey].url); if (uplinkUrl.host === tarballUrl.host) { tarballUrl.protocol = uplinkUrl.protocol; hash.registry = upLinkKey; hash.url = UrlNode.format(tarballUrl); } } async getSecret(config: Config) { const secretKey = await this.localData.getSecret(); return this.localData.setSecret(config.checkSecretKey(secretKey)); } _loadStorage(config: Config, logger: Logger): ILocalData { const Storage = this._loadStorePlugin(); if (_.isNil(Storage)) { assert(this.config.storage, 'CONFIG: storage path not defined'); return new LocalDatabase(this.config, logger); } else { return Storage; } } _loadStorePlugin(): ILocalData { const plugin_params = { config: this.config, logger: this.logger, }; return _.head( loadPlugin(this.config, this.config.store, plugin_params, plugin => { return plugin.getPackageStorage; }) ); } } export default LocalStorage;
1
20,293
form => from
verdaccio-verdaccio
js
@@ -28,6 +28,7 @@ public interface Rule extends PropertySource { * The property descriptor to universally suppress violations with messages * matching a regular expression. */ + // TODO 7.0.0 use PropertyDescriptor<Pattern> StringProperty VIOLATION_SUPPRESS_REGEX_DESCRIPTOR = new StringProperty("violationSuppressRegex", "Suppress violations with messages matching a regular expression", null, Integer.MAX_VALUE - 1);
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd; import java.util.List; import net.sourceforge.pmd.lang.Language; import net.sourceforge.pmd.lang.LanguageVersion; import net.sourceforge.pmd.lang.ParserOptions; import net.sourceforge.pmd.lang.ast.Node; import net.sourceforge.pmd.properties.PropertySource; import net.sourceforge.pmd.properties.StringProperty; /** * This is the basic Rule interface for PMD rules. * * <p> * <strong>Thread safety:</strong> PMD will create one instance of a rule per * thread. The instances are not shared across different threads. However, a * single rule instance is reused for analyzing multiple files. * </p> */ public interface Rule extends PropertySource { /** * The property descriptor to universally suppress violations with messages * matching a regular expression. */ StringProperty VIOLATION_SUPPRESS_REGEX_DESCRIPTOR = new StringProperty("violationSuppressRegex", "Suppress violations with messages matching a regular expression", null, Integer.MAX_VALUE - 1); /** * Name of the property to universally suppress violations on nodes which * match a given relative XPath expression. */ StringProperty VIOLATION_SUPPRESS_XPATH_DESCRIPTOR = new StringProperty("violationSuppressXPath", "Suppress violations on nodes which match a given relative XPath expression.", null, Integer.MAX_VALUE - 2); /** * Get the Language of this Rule. * * @return the language */ Language getLanguage(); /** * Set the Language of this Rule. * * @param language * the language */ void setLanguage(Language language); /** * Get the minimum LanguageVersion to which this Rule applies. If this value * is <code>null</code> it indicates there is no minimum bound. * * @return the minimum language version */ LanguageVersion getMinimumLanguageVersion(); /** * Set the minimum LanguageVersion to which this Rule applies. * * @param minimumLanguageVersion * the minimum language version */ void setMinimumLanguageVersion(LanguageVersion minimumLanguageVersion); /** * Get the maximum LanguageVersion to which this Rule applies. If this value * is <code>null</code> it indicates there is no maximum bound. * * @return the maximum language version */ LanguageVersion getMaximumLanguageVersion(); /** * Set the maximum LanguageVersion to which this Rule applies. * * @param maximumLanguageVersion * the maximum language version */ void setMaximumLanguageVersion(LanguageVersion maximumLanguageVersion); /** * Gets whether this Rule is deprecated. A deprecated Rule is one which: * <ul> * <li>is scheduled for removal in a future version of PMD</li> * <li>or, has been removed and replaced with a non-functioning place-holder * and will be completely removed in a future version of PMD</li> * <li>or, has been renamed/moved and the old name will be completely * removed in a future version of PMD</li> * </ul> * * @return <code>true</code> if this rule is deprecated */ boolean isDeprecated(); /** * Sets whether this Rule is deprecated. * * @param deprecated * whether this rule is deprecated */ void setDeprecated(boolean deprecated); /** * Get the name of this Rule. * * @return the name */ @Override String getName(); /** * Set the name of this Rule. * * @param name * the name */ void setName(String name); /** * Get the version of PMD in which this Rule was added. Return * <code>null</code> if not applicable. * * @return version of PMD since when this rule was added */ String getSince(); /** * Set the version of PMD in which this Rule was added. * * @param since * the version of PMD since when this rule was added */ void setSince(String since); /** * Get the implementation class of this Rule. * * @return the implementation class name of this rule. */ String getRuleClass(); /** * Set the class of this Rule. * * @param ruleClass * the class name of this rule. */ void setRuleClass(String ruleClass); /** * Get the name of the RuleSet containing this Rule. * * @return the name of th ruleset containing this rule. * @see RuleSet */ String getRuleSetName(); /** * Set the name of the RuleSet containing this Rule. * * @param name * the name of the ruleset containing this rule. * @see RuleSet */ void setRuleSetName(String name); /** * Get the message to show when this Rule identifies a violation. * * @return the message to show for a violation. */ String getMessage(); /** * Set the message to show when this Rule identifies a violation. * * @param message * the message to show for a violation. */ void setMessage(String message); /** * Get the description of this Rule. * * @return the description */ String getDescription(); /** * Set the description of this Rule. * * @param description * the description */ void setDescription(String description); /** * Get the list of examples for this Rule. * * @return the list of examples for this rule. */ List<String> getExamples(); /** * Add a single example for this Rule. * * @param example * a single example to add */ void addExample(String example); /** * Get a URL for external information about this Rule. * * @return the URL for external information about this rule. */ String getExternalInfoUrl(); /** * Set a URL for external information about this Rule. * * @param externalInfoUrl * the URL for external information about this rule. */ void setExternalInfoUrl(String externalInfoUrl); /** * Get the priority of this Rule. * * @return the priority */ RulePriority getPriority(); /** * Set the priority of this Rule. * * @param priority * the priority */ void setPriority(RulePriority priority); /** * Get the parser options for this Rule. Parser options are used to * configure the {@link net.sourceforge.pmd.lang.Parser} to create an AST in * the form the Rule is expecting. Because ParserOptions are mutable, a Rule * should return a new instance on each call. * * @return the parser options */ ParserOptions getParserOptions(); /** * Sets whether this Rule uses Data Flow Analysis. * @deprecated See {@link #isDfa()} */ @Deprecated // To be removed in PMD 7.0.0 void setUsesDFA(); /** * Sets whether this Rule uses Data Flow Analysis. * @deprecated See {@link #isDfa()} */ @Deprecated void setDfa(boolean isDfa); /** * Gets whether this Rule uses Data Flow Analysis. * * @return <code>true</code> if Data Flow Analysis is used. * @deprecated See {@link #isDfa()} */ @Deprecated // To be removed in PMD 7.0.0 boolean usesDFA(); /** * Gets whether this Rule uses Data Flow Analysis. * * @return <code>true</code> if Data Flow Analysis is used. * @deprecated Optional AST processing stages will be reified in 7.0.0 to factorise common logic. * This method and the similar methods will be removed. */ @Deprecated boolean isDfa(); /** * Sets whether this Rule uses Type Resolution. * @deprecated See {@link #isTypeResolution()} */ @Deprecated // To be removed in PMD 7.0.0 void setUsesTypeResolution(); /** * Sets whether this Rule uses Type Resolution. * @deprecated See {@link #isTypeResolution()} */ @Deprecated void setTypeResolution(boolean usingTypeResolution); /** * Gets whether this Rule uses Type Resolution. * * @return <code>true</code> if Type Resolution is used. * * @deprecated See {@link #isTypeResolution()} */ @Deprecated // To be removed in PMD 7.0.0 boolean usesTypeResolution(); /** * Gets whether this Rule uses Type Resolution. * * @return <code>true</code> if Type Resolution is used. * @deprecated Optional AST processing stages will be reified in 7.0.0 to factorise common logic. * This method and the similar methods will be removed. */ @Deprecated boolean isTypeResolution(); /** * Sets whether this Rule uses multi-file analysis. * @deprecated See {@link #isMultifile()} */ @Deprecated // To be removed in PMD 7.0.0 void setUsesMultifile(); /** * Sets whether this Rule uses multi-file analysis. * @deprecated See {@link #isMultifile()} */ @Deprecated void setMultifile(boolean multifile); /** * Gets whether this Rule uses multi-file analysis. * * @return <code>true</code> if the multi file analysis is used. * * @deprecated See {@link #isMultifile()} */ @Deprecated // To be removed in PMD 7.0.0 boolean usesMultifile(); /** * Gets whether this Rule uses multi-file analysis. * * @return <code>true</code> if the multi file analysis is used. * @deprecated Logic for multifile analysis is not implemented yet and probably * won't be implemented this way. Will be removed in 7.0.0. */ @Deprecated boolean isMultifile(); /** * Gets whether this Rule uses the RuleChain. * * @return <code>true</code> if RuleChain is used. * * @deprecated USe {@link #isRuleChain()} instead. */ @Deprecated // To be removed in PMD 7.0.0 boolean usesRuleChain(); /** * Gets whether this Rule uses the RuleChain. * * @return <code>true</code> if RuleChain is used. */ boolean isRuleChain(); /** * Gets the collection of AST node names visited by the Rule on the * RuleChain. * * @return the list of AST node names */ List<String> getRuleChainVisits(); /** * Adds an AST node by class to be visited by the Rule on the RuleChain. * * @param nodeClass * the AST node to add to the RuleChain visit list */ void addRuleChainVisit(Class<? extends Node> nodeClass); /** * Adds an AST node by name to be visited by the Rule on the RuleChain. * * @param astNodeName * the AST node to add to the RuleChain visit list as string */ void addRuleChainVisit(String astNodeName); /** * Start processing. Called once, before apply() is first called. * * @param ctx * the rule context */ void start(RuleContext ctx); /** * Apply this rule to the given collection of nodes, using the given * context. * * @param nodes * the nodes * @param ctx * the rule context */ void apply(List<? extends Node> nodes, RuleContext ctx); /** * End processing. Called once, after apply() is last called. * * @param ctx * the rule context */ void end(RuleContext ctx); /** * Creates a new copy of this rule. * @return A new exact copy of this rule */ Rule deepCopy(); }
1
15,025
Ok, we can't switch it now, because the properties are a field of the interface Rule - which makes it public API. Maybe we should remove it here in the (Java) API in 7.0.0? The only API left would be, when using a rule and setting the properties in the ruleset xml. There the type doesn't matter - since the String is then automatically converted into a Pattern.
pmd-pmd
java
@@ -66,7 +66,9 @@ public class GoGapicSurfaceTransformerTest { locator, new String[] {"myproto_gapic.yaml"}); - productConfig = GapicProductConfig.create(model, configProto, TargetLanguage.GO); + productConfig = + GapicProductConfig.create( + model, configProto, "google.example.myproto.v1", TargetLanguage.GO); if (model.getDiagReporter().getDiagCollector().hasErrors()) { throw new IllegalStateException(
1
/* Copyright 2016 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.transformer.go; import com.google.api.codegen.CodegenTestUtil; import com.google.api.codegen.ConfigProto; import com.google.api.codegen.common.TargetLanguage; import com.google.api.codegen.config.GapicProductConfig; import com.google.api.codegen.config.MethodModel; import com.google.api.codegen.config.ProtoMethodModel; import com.google.api.codegen.gapic.PackageNameCodePathMapper; import com.google.api.codegen.transformer.DefaultFeatureConfig; import com.google.api.codegen.transformer.GapicInterfaceContext; import com.google.api.codegen.util.TypeAlias; import com.google.api.tools.framework.model.Interface; import com.google.api.tools.framework.model.Method; import com.google.api.tools.framework.model.Model; import com.google.api.tools.framework.model.testing.TestDataLocator; import com.google.common.truth.Truth; import java.util.Collections; import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; import org.junit.rules.TemporaryFolder; public class GoGapicSurfaceTransformerTest { @ClassRule public static TemporaryFolder tempDir = new TemporaryFolder(); private static Model model; private static Interface apiInterface; private static GapicProductConfig productConfig; @BeforeClass public static void setupClass() { TestDataLocator locator = TestDataLocator.create(GoGapicSurfaceTransformerTest.class); model = CodegenTestUtil.readModel( locator, tempDir, new String[] {"myproto.proto", "singleservice.proto"}, new String[] {"myproto.yaml"}); for (Interface apiInterface : model.getSymbolTable().getInterfaces()) { if (apiInterface.getSimpleName().equals("Gopher")) { GoGapicSurfaceTransformerTest.apiInterface = apiInterface; break; } } ConfigProto configProto = CodegenTestUtil.readConfig( model.getDiagReporter().getDiagCollector(), locator, new String[] {"myproto_gapic.yaml"}); productConfig = GapicProductConfig.create(model, configProto, TargetLanguage.GO); if (model.getDiagReporter().getDiagCollector().hasErrors()) { throw new IllegalStateException( model.getDiagReporter().getDiagCollector().getDiags().toString()); } } private final GoGapicSurfaceTransformer transformer = new GoGapicSurfaceTransformer(new PackageNameCodePathMapper()); private GapicInterfaceContext context; @Before public void setup() { GoSurfaceNamer namer = new GoSurfaceNamer(productConfig.getPackageName()); context = GapicInterfaceContext.create( apiInterface, productConfig, GoGapicSurfaceTransformer.createTypeTable(), namer, new DefaultFeatureConfig()); } @Test public void testGetImportsPlain() { MethodModel method = new ProtoMethodModel(getMethod(context.getInterface(), "SimpleMethod")); transformer.addXApiImports(context, Collections.singletonList(method)); transformer.generateRetryConfigDefinitions(context, Collections.singletonList(method)); Truth.assertThat(context.getImportTypeTable().getImports()).doesNotContainKey("time"); Truth.assertThat(context.getImportTypeTable().getImports()) .doesNotContainKey("cloud.google.com/go/longrunning"); } @Test public void testGetImportsRetry() { MethodModel method = new ProtoMethodModel(getMethod(context.getInterface(), "RetryMethod")); transformer.addXApiImports(context, Collections.singletonList(method)); transformer.generateRetryConfigDefinitions(context, Collections.singletonList(method)); Truth.assertThat(context.getImportTypeTable().getImports()).containsKey("time"); Truth.assertThat(context.getImportTypeTable().getImports()) .doesNotContainKey("cloud.google.com/go/longrunning"); } @Test public void testGetImportsPageStream() { MethodModel method = new ProtoMethodModel(getMethod(context.getInterface(), "PageStreamMethod")); transformer.addXApiImports(context, Collections.singletonList(method)); transformer.generateRetryConfigDefinitions(context, Collections.singletonList(method)); Truth.assertThat(context.getImportTypeTable().getImports()).containsKey("math"); Truth.assertThat(context.getImportTypeTable().getImports()) .doesNotContainKey("cloud.google.com/go/longrunning"); } @Test public void testGetImportsLro() { MethodModel method = new ProtoMethodModel(getMethod(context.getInterface(), "LroMethod")); transformer.addXApiImports(context, Collections.singletonList(method)); transformer.generateRetryConfigDefinitions(context, Collections.singletonList(method)); Truth.assertThat(context.getImportTypeTable().getImports()).doesNotContainKey("math"); Truth.assertThat(context.getImportTypeTable().getImports()) .containsKey("cloud.google.com/go/longrunning"); } @Test public void testGetImportsNotLro() { MethodModel method = new ProtoMethodModel(getMethod(context.getInterface(), "NotLroMethod")); transformer.addXApiImports(context, Collections.singletonList(method)); Truth.assertThat(context.getImportTypeTable().getImports()) .doesNotContainKey("cloud.google.com/go/longrunning"); } @Test public void testGetExampleImportsServerStream() { MethodModel method = new ProtoMethodModel(getMethod(context.getInterface(), "ServerStreamMethod")); transformer.addXExampleImports(context, Collections.singletonList(method)); Truth.assertThat(context.getImportTypeTable().getImports()).containsKey("io"); } @Test public void testGetExampleImportsBidiStream() { MethodModel method = new ProtoMethodModel(getMethod(context.getInterface(), "BidiStreamMethod")); transformer.addXExampleImports(context, Collections.singletonList(method)); Truth.assertThat(context.getImportTypeTable().getImports()).containsKey("io"); } @Test public void testGetExampleImportsClientStream() { MethodModel method = new ProtoMethodModel(getMethod(context.getInterface(), "ClientStreamMethod")); transformer.addXExampleImports(context, Collections.singletonList(method)); Truth.assertThat(context.getImportTypeTable().getImports()).doesNotContainKey("io"); } @Test public void testExampleImports() { transformer.addXExampleImports(context, context.getSupportedMethods()); Truth.assertThat(context.getImportTypeTable().getImports()) .containsEntry( "golang.org/x/net/context", TypeAlias.create("golang.org/x/net/context", "")); Truth.assertThat(context.getImportTypeTable().getImports()) .containsEntry( "cloud.google.com/go/gopher/apiv1", TypeAlias.create("cloud.google.com/go/gopher/apiv1", "")); Truth.assertThat(context.getImportTypeTable().getImports()) .containsEntry( "google.golang.org/genproto/googleapis/example/myproto/v1", TypeAlias.create( "google.golang.org/genproto/googleapis/example/myproto/v1", "myprotopb")); // Only shows up in response, not needed for example. Truth.assertThat(context.getImportTypeTable().getImports()) .doesNotContainKey("google.golang.org/genproto/googleapis/example/odd/v1"); } private Method getMethod(Interface apiInterface, String methodName) { Method method = apiInterface.lookupMethod(methodName); if (method == null) { throw new IllegalArgumentException( String.format( "Method %s not found, available: %s", methodName, apiInterface.getMethods())); } return method; } }
1
26,866
Pass in null here, instead of a value? Maaaaybe we should add an overload for `GapicProductConfig.create` that matches the original signature and passes through null? I don't feel strongly about that though, so happy to leave asis if you prefer.
googleapis-gapic-generator
java
@@ -94,7 +94,7 @@ class CheckCompoundPattern { CharsRef expandReplacement(CharsRef word, int breakPos) { if (replacement != null && charsMatch(word, breakPos, replacement)) { return new CharsRef( - word.subSequence(0, breakPos) + new String(word.chars, 0, word.offset + breakPos) + endChars + beginChars + word.subSequence(breakPos + replacement.length(), word.length));
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.analysis.hunspell; import org.apache.lucene.util.CharsRef; class CheckCompoundPattern { private final String endChars; private final String beginChars; private final String replacement; private final char[] endFlags; private final char[] beginFlags; private final Dictionary dictionary; CheckCompoundPattern( String unparsed, Dictionary.FlagParsingStrategy strategy, Dictionary dictionary) { this.dictionary = dictionary; String[] parts = unparsed.split("\\s+"); if (parts.length < 3) { throw new IllegalArgumentException("Invalid pattern: " + unparsed); } int flagSep = parts[1].indexOf("/"); endChars = flagSep < 0 ? parts[1] : parts[1].substring(0, flagSep); endFlags = flagSep < 0 ? new char[0] : strategy.parseFlags(parts[1].substring(flagSep + 1)); flagSep = parts[2].indexOf("/"); beginChars = flagSep < 0 ? parts[2] : parts[2].substring(0, flagSep); beginFlags = flagSep < 0 ? new char[0] : strategy.parseFlags(parts[2].substring(flagSep + 1)); replacement = parts.length == 3 ? null : parts[3]; } @Override public String toString() { return endChars + " " + beginChars + (replacement == null ? "" : " -> " + replacement); } boolean prohibitsCompounding(CharsRef word, int breakPos, Root<?> rootBefore, Root<?> rootAfter) { if (isNonAffixedPattern(endChars)) { if (!charsMatch(word, breakPos - rootBefore.word.length(), rootBefore.word)) { return false; } } else if (!charsMatch(word, breakPos - endChars.length(), endChars)) { return false; } if (isNonAffixedPattern(beginChars)) { if (!charsMatch(word, breakPos, rootAfter.word)) { return false; } } else if (!charsMatch(word, breakPos, beginChars)) { return false; } if (endFlags.length > 0 && !hasAllFlags(rootBefore, endFlags)) { return false; } //noinspection RedundantIfStatement if (beginFlags.length > 0 && !hasAllFlags(rootAfter, beginFlags)) { return false; } return true; } private static boolean isNonAffixedPattern(String pattern) { return pattern.length() == 1 && pattern.charAt(0) == '0'; } private boolean hasAllFlags(Root<?> root, char[] flags) { for (char flag : flags) { if (!dictionary.hasFlag(root.entryId, flag)) { return false; } } return true; } CharsRef expandReplacement(CharsRef word, int breakPos) { if (replacement != null && charsMatch(word, breakPos, replacement)) { return new CharsRef( word.subSequence(0, breakPos) + endChars + beginChars + word.subSequence(breakPos + replacement.length(), word.length)); } return null; } int endLength() { return endChars.length(); } private static boolean charsMatch(CharsRef word, int offset, CharSequence pattern) { int len = pattern.length(); if (word.length - offset < len || offset < 0 || offset > word.length) { return false; } for (int i = 0; i < len; i++) { if (word.chars[word.offset + offset + i] != pattern.charAt(i)) { return false; } } return true; } }
1
40,354
Include all compounds parts from the very beginning for the case check to work later
apache-lucene-solr
java
@@ -461,7 +461,7 @@ func get(envInfo *cmds.Agent, proxy proxy.Proxy) (*config.Node, error) { } if !nodeConfig.Docker && nodeConfig.ContainerRuntimeEndpoint == "" { - nodeConfig.AgentConfig.RuntimeSocket = nodeConfig.Containerd.Address + nodeConfig.AgentConfig.RuntimeSocket = "unix://" + nodeConfig.Containerd.Address } else { nodeConfig.AgentConfig.RuntimeSocket = nodeConfig.ContainerRuntimeEndpoint nodeConfig.AgentConfig.CNIPlugin = true
1
package config import ( "bufio" "context" cryptorand "crypto/rand" "crypto/tls" "encoding/hex" "encoding/pem" "fmt" "io/ioutil" sysnet "net" "net/http" "net/url" "os" "os/exec" "path/filepath" "regexp" "strings" "time" "github.com/pkg/errors" "github.com/rancher/k3s/pkg/agent/proxy" "github.com/rancher/k3s/pkg/cli/cmds" "github.com/rancher/k3s/pkg/clientaccess" "github.com/rancher/k3s/pkg/daemons/config" "github.com/rancher/k3s/pkg/daemons/control" "github.com/rancher/k3s/pkg/version" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/net" ) const ( DefaultPodManifestPath = "pod-manifests" ) func Get(ctx context.Context, agent cmds.Agent, proxy proxy.Proxy) *config.Node { for { agentConfig, err := get(&agent, proxy) if err != nil { logrus.Errorf("Failed to retrieve agent config: %v", err) select { case <-time.After(5 * time.Second): continue case <-ctx.Done(): logrus.Fatalf("Interrupted") } } return agentConfig } } type HTTPRequester func(u string, client *http.Client, username, password string) ([]byte, error) func Request(path string, info *clientaccess.Info, requester HTTPRequester) ([]byte, error) { u, err := url.Parse(info.BaseURL) if err != nil { return nil, err } u.Path = path return requester(u.String(), clientaccess.GetHTTPClient(info.CACerts), info.Username, info.Password) } func getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile string) HTTPRequester { return func(u string, client *http.Client, username, password string) ([]byte, error) { req, err := http.NewRequest(http.MethodGet, u, nil) if err != nil { return nil, err } if username != "" { req.SetBasicAuth(username, password) } req.Header.Set(version.Program+"-Node-Name", nodeName) nodePassword, err := ensureNodePassword(nodePasswordFile) if err != nil { return nil, err } req.Header.Set(version.Program+"-Node-Password", nodePassword) req.Header.Set(version.Program+"-Node-IP", nodeIP) resp, err := client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusForbidden { return nil, fmt.Errorf("Node password rejected, duplicate hostname or contents of '%s' may not match server node-passwd entry, try enabling a unique node name with the --with-node-id flag", nodePasswordFile) } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("%s: %s", u, resp.Status) } return ioutil.ReadAll(resp.Body) } } func ensureNodeID(nodeIDFile string) (string, error) { if _, err := os.Stat(nodeIDFile); err == nil { id, err := ioutil.ReadFile(nodeIDFile) return strings.TrimSpace(string(id)), err } id := make([]byte, 4, 4) _, err := cryptorand.Read(id) if err != nil { return "", err } nodeID := hex.EncodeToString(id) return nodeID, ioutil.WriteFile(nodeIDFile, []byte(nodeID+"\n"), 0644) } func ensureNodePassword(nodePasswordFile string) (string, error) { if _, err := os.Stat(nodePasswordFile); err == nil { password, err := ioutil.ReadFile(nodePasswordFile) return strings.TrimSpace(string(password)), err } password := make([]byte, 16, 16) _, err := cryptorand.Read(password) if err != nil { return "", err } nodePassword := hex.EncodeToString(password) return nodePassword, ioutil.WriteFile(nodePasswordFile, []byte(nodePassword+"\n"), 0600) } func upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile string) { password, err := ioutil.ReadFile(oldNodePasswordFile) if err != nil { return } if err := ioutil.WriteFile(newNodePasswordFile, password, 0600); err != nil { logrus.Warnf("Unable to write password file: %v", err) return } if err := os.Remove(oldNodePasswordFile); err != nil { logrus.Warnf("Unable to remove old password file: %v", err) return } } func getServingCert(nodeName, nodeIP, servingCertFile, servingKeyFile, nodePasswordFile string, info *clientaccess.Info) (*tls.Certificate, error) { servingCert, err := Request("/v1-"+version.Program+"/serving-kubelet.crt", info, getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile)) if err != nil { return nil, err } servingCert, servingKey := splitCertKeyPEM(servingCert) if err := ioutil.WriteFile(servingCertFile, servingCert, 0600); err != nil { return nil, errors.Wrapf(err, "failed to write node cert") } if err := ioutil.WriteFile(servingKeyFile, servingKey, 0600); err != nil { return nil, errors.Wrapf(err, "failed to write node key") } cert, err := tls.X509KeyPair(servingCert, servingKey) if err != nil { return nil, err } return &cert, nil } func getHostFile(filename, keyFile string, info *clientaccess.Info) error { basename := filepath.Base(filename) fileBytes, err := clientaccess.Get("/v1-"+version.Program+"/"+basename, info) if err != nil { return err } if keyFile == "" { if err := ioutil.WriteFile(filename, fileBytes, 0600); err != nil { return errors.Wrapf(err, "failed to write cert %s", filename) } } else { fileBytes, keyBytes := splitCertKeyPEM(fileBytes) if err := ioutil.WriteFile(filename, fileBytes, 0600); err != nil { return errors.Wrapf(err, "failed to write cert %s", filename) } if err := ioutil.WriteFile(keyFile, keyBytes, 0600); err != nil { return errors.Wrapf(err, "failed to write key %s", filename) } } return nil } func splitCertKeyPEM(bytes []byte) (certPem []byte, keyPem []byte) { for { b, rest := pem.Decode(bytes) if b == nil { break } bytes = rest if strings.Contains(b.Type, "PRIVATE KEY") { keyPem = append(keyPem, pem.EncodeToMemory(b)...) } else { certPem = append(certPem, pem.EncodeToMemory(b)...) } } return } func getNodeNamedHostFile(filename, keyFile, nodeName, nodeIP, nodePasswordFile string, info *clientaccess.Info) error { basename := filepath.Base(filename) fileBytes, err := Request("/v1-"+version.Program+"/"+basename, info, getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile)) if err != nil { return err } fileBytes, keyBytes := splitCertKeyPEM(fileBytes) if err := ioutil.WriteFile(filename, fileBytes, 0600); err != nil { return errors.Wrapf(err, "failed to write cert %s", filename) } if err := ioutil.WriteFile(keyFile, keyBytes, 0600); err != nil { return errors.Wrapf(err, "failed to write key %s", filename) } return nil } func getHostnameAndIP(info cmds.Agent) (string, string, error) { ip := info.NodeIP if ip == "" { hostIP, err := net.ChooseHostInterface() if err != nil { return "", "", err } ip = hostIP.String() } name := info.NodeName if name == "" { hostname, err := os.Hostname() if err != nil { return "", "", err } name = hostname } // Use lower case hostname to comply with kubernetes constraint: // https://github.com/kubernetes/kubernetes/issues/71140 name = strings.ToLower(name) return name, ip, nil } func isValidResolvConf(resolvConfFile string) bool { file, err := os.Open(resolvConfFile) if err != nil { return false } defer file.Close() nameserver := regexp.MustCompile(`^nameserver\s+([^\s]*)`) scanner := bufio.NewScanner(file) for scanner.Scan() { ipMatch := nameserver.FindStringSubmatch(scanner.Text()) if len(ipMatch) == 2 { ip := sysnet.ParseIP(ipMatch[1]) if ip == nil || !ip.IsGlobalUnicast() { return false } } } if err := scanner.Err(); err != nil { return false } return true } func locateOrGenerateResolvConf(envInfo *cmds.Agent) string { if envInfo.ResolvConf != "" { return envInfo.ResolvConf } resolvConfs := []string{"/etc/resolv.conf", "/run/systemd/resolve/resolv.conf"} for _, conf := range resolvConfs { if isValidResolvConf(conf) { return conf } } tmpConf := filepath.Join(os.TempDir(), version.Program+"-resolv.conf") if err := ioutil.WriteFile(tmpConf, []byte("nameserver 8.8.8.8\n"), 0444); err != nil { logrus.Errorf("Failed to write %s: %v", tmpConf, err) return "" } return tmpConf } func get(envInfo *cmds.Agent, proxy proxy.Proxy) (*config.Node, error) { if envInfo.Debug { logrus.SetLevel(logrus.DebugLevel) } info, err := clientaccess.ParseAndValidateToken(proxy.SupervisorURL(), envInfo.Token) if err != nil { return nil, err } controlConfig, err := getConfig(info) if err != nil { return nil, err } if controlConfig.SupervisorPort != controlConfig.HTTPSPort { if err := proxy.StartAPIServerProxy(controlConfig.HTTPSPort); err != nil { return nil, errors.Wrapf(err, "failed to setup access to API Server port %d on at %s", controlConfig.HTTPSPort, proxy.SupervisorURL()) } } var flannelIface *sysnet.Interface if !envInfo.NoFlannel && len(envInfo.FlannelIface) > 0 { flannelIface, err = sysnet.InterfaceByName(envInfo.FlannelIface) if err != nil { return nil, errors.Wrapf(err, "unable to find interface") } } clientCAFile := filepath.Join(envInfo.DataDir, "client-ca.crt") if err := getHostFile(clientCAFile, "", info); err != nil { return nil, err } serverCAFile := filepath.Join(envInfo.DataDir, "server-ca.crt") if err := getHostFile(serverCAFile, "", info); err != nil { return nil, err } servingKubeletCert := filepath.Join(envInfo.DataDir, "serving-kubelet.crt") servingKubeletKey := filepath.Join(envInfo.DataDir, "serving-kubelet.key") nodePasswordRoot := "/" if envInfo.Rootless { nodePasswordRoot = envInfo.DataDir } nodeConfigPath := filepath.Join(nodePasswordRoot, "etc", "rancher", "node") if err := os.MkdirAll(nodeConfigPath, 0755); err != nil { return nil, err } oldNodePasswordFile := filepath.Join(envInfo.DataDir, "node-password.txt") newNodePasswordFile := filepath.Join(nodeConfigPath, "password") upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile) nodeName, nodeIP, err := getHostnameAndIP(*envInfo) if err != nil { return nil, err } if envInfo.WithNodeID { nodeID, err := ensureNodeID(filepath.Join(nodeConfigPath, "id")) if err != nil { return nil, err } nodeName += "-" + nodeID } servingCert, err := getServingCert(nodeName, nodeIP, servingKubeletCert, servingKubeletKey, newNodePasswordFile, info) if err != nil { return nil, err } clientKubeletCert := filepath.Join(envInfo.DataDir, "client-kubelet.crt") clientKubeletKey := filepath.Join(envInfo.DataDir, "client-kubelet.key") if err := getNodeNamedHostFile(clientKubeletCert, clientKubeletKey, nodeName, nodeIP, newNodePasswordFile, info); err != nil { return nil, err } kubeconfigKubelet := filepath.Join(envInfo.DataDir, "kubelet.kubeconfig") if err := control.KubeConfig(kubeconfigKubelet, proxy.APIServerURL(), serverCAFile, clientKubeletCert, clientKubeletKey); err != nil { return nil, err } clientKubeProxyCert := filepath.Join(envInfo.DataDir, "client-kube-proxy.crt") clientKubeProxyKey := filepath.Join(envInfo.DataDir, "client-kube-proxy.key") if err := getHostFile(clientKubeProxyCert, clientKubeProxyKey, info); err != nil { return nil, err } kubeconfigKubeproxy := filepath.Join(envInfo.DataDir, "kubeproxy.kubeconfig") if err := control.KubeConfig(kubeconfigKubeproxy, proxy.APIServerURL(), serverCAFile, clientKubeProxyCert, clientKubeProxyKey); err != nil { return nil, err } clientK3sControllerCert := filepath.Join(envInfo.DataDir, "client-"+version.Program+"-controller.crt") clientK3sControllerKey := filepath.Join(envInfo.DataDir, "client-"+version.Program+"-controller.key") if err := getHostFile(clientK3sControllerCert, clientK3sControllerKey, info); err != nil { return nil, err } kubeconfigK3sController := filepath.Join(envInfo.DataDir, version.Program+"controller.kubeconfig") if err := control.KubeConfig(kubeconfigK3sController, proxy.APIServerURL(), serverCAFile, clientK3sControllerCert, clientK3sControllerKey); err != nil { return nil, err } nodeConfig := &config.Node{ Docker: envInfo.Docker, SELinux: envInfo.EnableSELinux, ContainerRuntimeEndpoint: envInfo.ContainerRuntimeEndpoint, FlannelBackend: controlConfig.FlannelBackend, } nodeConfig.FlannelIface = flannelIface nodeConfig.Images = filepath.Join(envInfo.DataDir, "images") nodeConfig.AgentConfig.NodeIP = nodeIP nodeConfig.AgentConfig.NodeName = nodeName nodeConfig.AgentConfig.NodeConfigPath = nodeConfigPath nodeConfig.AgentConfig.NodeExternalIP = envInfo.NodeExternalIP nodeConfig.AgentConfig.ServingKubeletCert = servingKubeletCert nodeConfig.AgentConfig.ServingKubeletKey = servingKubeletKey nodeConfig.AgentConfig.ClusterDNS = controlConfig.ClusterDNS nodeConfig.AgentConfig.ClusterDomain = controlConfig.ClusterDomain nodeConfig.AgentConfig.ResolvConf = locateOrGenerateResolvConf(envInfo) nodeConfig.AgentConfig.ClientCA = clientCAFile nodeConfig.AgentConfig.ListenAddress = "0.0.0.0" nodeConfig.AgentConfig.KubeConfigKubelet = kubeconfigKubelet nodeConfig.AgentConfig.KubeConfigKubeProxy = kubeconfigKubeproxy nodeConfig.AgentConfig.KubeConfigK3sController = kubeconfigK3sController if envInfo.Rootless { nodeConfig.AgentConfig.RootDir = filepath.Join(envInfo.DataDir, "kubelet") } nodeConfig.AgentConfig.PauseImage = envInfo.PauseImage nodeConfig.AgentConfig.Snapshotter = envInfo.Snapshotter nodeConfig.AgentConfig.IPSECPSK = controlConfig.IPSECPSK nodeConfig.AgentConfig.StrongSwanDir = filepath.Join(envInfo.DataDir, "strongswan") nodeConfig.CACerts = info.CACerts nodeConfig.Containerd.Config = filepath.Join(envInfo.DataDir, "etc/containerd/config.toml") nodeConfig.Containerd.Root = filepath.Join(envInfo.DataDir, "containerd") nodeConfig.Containerd.Opt = filepath.Join(envInfo.DataDir, "containerd") if !envInfo.Debug { nodeConfig.Containerd.Log = filepath.Join(envInfo.DataDir, "containerd/containerd.log") } nodeConfig.Containerd.State = "/run/k3s/containerd" nodeConfig.Containerd.Address = filepath.Join(nodeConfig.Containerd.State, "containerd.sock") nodeConfig.Containerd.Template = filepath.Join(envInfo.DataDir, "etc/containerd/config.toml.tmpl") nodeConfig.Certificate = servingCert if nodeConfig.FlannelBackend == config.FlannelBackendNone { nodeConfig.NoFlannel = true } else { nodeConfig.NoFlannel = envInfo.NoFlannel } if !nodeConfig.NoFlannel { hostLocal, err := exec.LookPath("host-local") if err != nil { return nil, errors.Wrapf(err, "failed to find host-local") } if envInfo.FlannelConf == "" { nodeConfig.FlannelConf = filepath.Join(envInfo.DataDir, "etc/flannel/net-conf.json") } else { nodeConfig.FlannelConf = envInfo.FlannelConf nodeConfig.FlannelConfOverride = true } nodeConfig.AgentConfig.CNIBinDir = filepath.Dir(hostLocal) nodeConfig.AgentConfig.CNIConfDir = filepath.Join(envInfo.DataDir, "etc/cni/net.d") } if !nodeConfig.Docker && nodeConfig.ContainerRuntimeEndpoint == "" { nodeConfig.AgentConfig.RuntimeSocket = nodeConfig.Containerd.Address } else { nodeConfig.AgentConfig.RuntimeSocket = nodeConfig.ContainerRuntimeEndpoint nodeConfig.AgentConfig.CNIPlugin = true } if controlConfig.ClusterIPRange != nil { nodeConfig.AgentConfig.ClusterCIDR = *controlConfig.ClusterIPRange } os.Setenv("NODE_NAME", nodeConfig.AgentConfig.NodeName) nodeConfig.AgentConfig.ExtraKubeletArgs = envInfo.ExtraKubeletArgs nodeConfig.AgentConfig.ExtraKubeProxyArgs = envInfo.ExtraKubeProxyArgs nodeConfig.AgentConfig.NodeTaints = envInfo.Taints nodeConfig.AgentConfig.NodeLabels = envInfo.Labels nodeConfig.AgentConfig.PrivateRegistry = envInfo.PrivateRegistry nodeConfig.AgentConfig.DisableCCM = controlConfig.DisableCCM nodeConfig.AgentConfig.DisableNPC = controlConfig.DisableNPC nodeConfig.AgentConfig.DisableKubeProxy = controlConfig.DisableKubeProxy nodeConfig.AgentConfig.Rootless = envInfo.Rootless nodeConfig.AgentConfig.PodManifests = filepath.Join(envInfo.DataDir, DefaultPodManifestPath) nodeConfig.AgentConfig.ProtectKernelDefaults = envInfo.ProtectKernelDefaults return nodeConfig, nil } func getConfig(info *clientaccess.Info) (*config.Control, error) { data, err := clientaccess.Get("/v1-"+version.Program+"/config", info) if err != nil { return nil, err } controlControl := &config.Control{} return controlControl, json.Unmarshal(data, controlControl) }
1
8,729
if the user specifies a url scheme on the cli this is going to cause problems, no?
k3s-io-k3s
go
@@ -726,7 +726,7 @@ define(['loading', 'appRouter', 'layoutManager', 'connectionManager', 'userSetti disableIndicators: true, disableHoverMenu: true, overlayPlayButton: true, - width: dom.getWindowSize().innerWidth * 0.25 + width: dom.getWindowSize().innerWidth * 0.5 }); elem.innerHTML = cardHtml;
1
define(['loading', 'appRouter', 'layoutManager', 'connectionManager', 'userSettings', 'cardBuilder', 'datetime', 'mediaInfo', 'backdrop', 'listView', 'itemContextMenu', 'itemHelper', 'dom', 'indicators', 'imageLoader', 'libraryMenu', 'globalize', 'browser', 'events', 'playbackManager', 'scrollStyles', 'emby-itemscontainer', 'emby-checkbox', 'emby-button', 'emby-playstatebutton', 'emby-ratingbutton', 'emby-scroller', 'emby-select'], function (loading, appRouter, layoutManager, connectionManager, userSettings, cardBuilder, datetime, mediaInfo, backdrop, listView, itemContextMenu, itemHelper, dom, indicators, imageLoader, libraryMenu, globalize, browser, events, playbackManager) { 'use strict'; function getPromise(apiClient, params) { var id = params.id; if (id) { return apiClient.getItem(apiClient.getCurrentUserId(), id); } if (params.seriesTimerId) { return apiClient.getLiveTvSeriesTimer(params.seriesTimerId); } if (params.genre) { return apiClient.getGenre(params.genre, apiClient.getCurrentUserId()); } if (params.musicgenre) { return apiClient.getMusicGenre(params.musicgenre, apiClient.getCurrentUserId()); } if (params.musicartist) { return apiClient.getArtist(params.musicartist, apiClient.getCurrentUserId()); } throw new Error('Invalid request'); } function hideAll(page, className, show) { for (const elem of page.querySelectorAll('.' + className)) { if (show) { elem.classList.remove('hide'); } else { elem.classList.add('hide'); } } } function getContextMenuOptions(item, user, button) { var options = { item: item, open: false, play: false, playAllFromHere: false, queueAllFromHere: false, positionTo: button, cancelTimer: false, record: false, deleteItem: item.CanDelete === true, shuffle: false, instantMix: false, user: user, share: true }; return options; } function getProgramScheduleHtml(items) { var html = ''; html += '<div is="emby-itemscontainer" class="itemsContainer vertical-list" data-contextmenu="false">'; html += listView.getListViewHtml({ items: items, enableUserDataButtons: false, image: true, imageSource: 'channel', showProgramDateTime: true, showChannel: false, mediaInfo: false, action: 'none', moreButton: false, recordButton: false }); html += '</div>'; return html; } function renderSeriesTimerSchedule(page, apiClient, seriesTimerId) { apiClient.getLiveTvTimers({ UserId: apiClient.getCurrentUserId(), ImageTypeLimit: 1, EnableImageTypes: 'Primary,Backdrop,Thumb', SortBy: 'StartDate', EnableTotalRecordCount: false, EnableUserData: false, SeriesTimerId: seriesTimerId, Fields: 'ChannelInfo,ChannelImage' }).then(function (result) { if (result.Items.length && result.Items[0].SeriesTimerId != seriesTimerId) { result.Items = []; } var html = getProgramScheduleHtml(result.Items); var scheduleTab = page.querySelector('.seriesTimerSchedule'); scheduleTab.innerHTML = html; imageLoader.lazyChildren(scheduleTab); }); } function renderTimerEditor(page, item, apiClient, user) { if ('Recording' !== item.Type || !user.Policy.EnableLiveTvManagement || !item.TimerId || 'InProgress' !== item.Status) { return void hideAll(page, 'btnCancelTimer'); } hideAll(page, 'btnCancelTimer', true); } function renderSeriesTimerEditor(page, item, apiClient, user) { if ('SeriesTimer' !== item.Type) { return void hideAll(page, 'btnCancelSeriesTimer'); } if (user.Policy.EnableLiveTvManagement) { require(['seriesRecordingEditor'], function (seriesRecordingEditor) { seriesRecordingEditor.embed(item, apiClient.serverId(), { context: page.querySelector('.seriesRecordingEditor') }); }); page.querySelector('.seriesTimerScheduleSection').classList.remove('hide'); hideAll(page, 'btnCancelSeriesTimer', true); return void renderSeriesTimerSchedule(page, apiClient, item.Id); } page.querySelector('.seriesTimerScheduleSection').classList.add('hide'); return void hideAll(page, 'btnCancelSeriesTimer'); } function renderTrackSelections(page, instance, item, forceReload) { var select = page.querySelector('.selectSource'); if (!item.MediaSources || !itemHelper.supportsMediaSourceSelection(item) || -1 === playbackManager.getSupportedCommands().indexOf('PlayMediaSource') || !playbackManager.canPlay(item)) { page.querySelector('.trackSelections').classList.add('hide'); select.innerHTML = ''; page.querySelector('.selectVideo').innerHTML = ''; page.querySelector('.selectAudio').innerHTML = ''; page.querySelector('.selectSubtitles').innerHTML = ''; return; } var mediaSources = item.MediaSources; instance._currentPlaybackMediaSources = mediaSources; page.querySelector('.trackSelections').classList.remove('hide'); select.setLabel(globalize.translate('LabelVersion')); var currentValue = select.value; var selectedId = mediaSources[0].Id; select.innerHTML = mediaSources.map(function (v) { var selected = v.Id === selectedId ? ' selected' : ''; return '<option value="' + v.Id + '"' + selected + '>' + v.Name + '</option>'; }).join(''); if (mediaSources.length > 1) { page.querySelector('.selectSourceContainer').classList.remove('hide'); } else { page.querySelector('.selectSourceContainer').classList.add('hide'); } if (select.value !== currentValue || forceReload) { renderVideoSelections(page, mediaSources); renderAudioSelections(page, mediaSources); renderSubtitleSelections(page, mediaSources); } } function renderVideoSelections(page, mediaSources) { var mediaSourceId = page.querySelector('.selectSource').value; var mediaSource = mediaSources.filter(function (m) { return m.Id === mediaSourceId; })[0]; var tracks = mediaSource.MediaStreams.filter(function (m) { return m.Type === 'Video'; }); var select = page.querySelector('.selectVideo'); select.setLabel(globalize.translate('LabelVideo')); var selectedId = tracks.length ? tracks[0].Index : -1; select.innerHTML = tracks.map(function (v) { var selected = v.Index === selectedId ? ' selected' : ''; var titleParts = []; var resolutionText = mediaInfo.getResolutionText(v); if (resolutionText) { titleParts.push(resolutionText); } if (v.Codec) { titleParts.push(v.Codec.toUpperCase()); } return '<option value="' + v.Index + '" ' + selected + '>' + (v.DisplayTitle || titleParts.join(' ')) + '</option>'; }).join(''); select.setAttribute('disabled', 'disabled'); if (tracks.length) { page.querySelector('.selectVideoContainer').classList.remove('hide'); } else { page.querySelector('.selectVideoContainer').classList.add('hide'); } } function renderAudioSelections(page, mediaSources) { var mediaSourceId = page.querySelector('.selectSource').value; var mediaSource = mediaSources.filter(function (m) { return m.Id === mediaSourceId; })[0]; var tracks = mediaSource.MediaStreams.filter(function (m) { return 'Audio' === m.Type; }); var select = page.querySelector('.selectAudio'); select.setLabel(globalize.translate('LabelAudio')); var selectedId = mediaSource.DefaultAudioStreamIndex; select.innerHTML = tracks.map(function (v) { var selected = v.Index === selectedId ? ' selected' : ''; return '<option value="' + v.Index + '" ' + selected + '>' + v.DisplayTitle + '</option>'; }).join(''); if (tracks.length > 1) { select.removeAttribute('disabled'); } else { select.setAttribute('disabled', 'disabled'); } if (tracks.length) { page.querySelector('.selectAudioContainer').classList.remove('hide'); } else { page.querySelector('.selectAudioContainer').classList.add('hide'); } } function renderSubtitleSelections(page, mediaSources) { var mediaSourceId = page.querySelector('.selectSource').value; var mediaSource = mediaSources.filter(function (m) { return m.Id === mediaSourceId; })[0]; var tracks = mediaSource.MediaStreams.filter(function (m) { return 'Subtitle' === m.Type; }); var select = page.querySelector('.selectSubtitles'); select.setLabel(globalize.translate('LabelSubtitles')); var selectedId = null == mediaSource.DefaultSubtitleStreamIndex ? -1 : mediaSource.DefaultSubtitleStreamIndex; var videoTracks = mediaSource.MediaStreams.filter(function (m) { return 'Video' === m.Type; }); // This only makes sence on Video items if (videoTracks.length) { var selected = -1 === selectedId ? ' selected' : ''; select.innerHTML = '<option value="-1">' + globalize.translate('Off') + '</option>' + tracks.map(function (v) { selected = v.Index === selectedId ? ' selected' : ''; return '<option value="' + v.Index + '" ' + selected + '>' + v.DisplayTitle + '</option>'; }).join(''); if (tracks.length > 1) { select.removeAttribute('disabled'); } else { select.setAttribute('disabled', 'disabled'); } page.querySelector('.selectSubtitlesContainer').classList.remove('hide'); } else { select.innerHTML = ''; page.querySelector('.selectSubtitlesContainer').classList.add('hide'); } } function reloadPlayButtons(page, item) { var canPlay = false; if ('Program' == item.Type) { var now = new Date(); if (now >= datetime.parseISO8601Date(item.StartDate, true) && now < datetime.parseISO8601Date(item.EndDate, true)) { hideAll(page, 'btnPlay', true); canPlay = true; } else { hideAll(page, 'btnPlay'); } hideAll(page, 'btnResume'); hideAll(page, 'btnInstantMix'); hideAll(page, 'btnShuffle'); } else if (playbackManager.canPlay(item)) { hideAll(page, 'btnPlay', true); var enableInstantMix = -1 !== ['Audio', 'MusicAlbum', 'MusicGenre', 'MusicArtist'].indexOf(item.Type); hideAll(page, 'btnInstantMix', enableInstantMix); var enableShuffle = item.IsFolder || -1 !== ['MusicAlbum', 'MusicGenre', 'MusicArtist'].indexOf(item.Type); hideAll(page, 'btnShuffle', enableShuffle); canPlay = true; const isResumable = item.UserData && item.UserData.PlaybackPositionTicks > 0; hideAll(page, 'btnResume', isResumable); if (isResumable) { for (const elem of page.querySelectorAll('.btnPlay')) { elem.querySelector('.detailButton-icon').classList.replace('play_arrow', 'replay'); } } } else { hideAll(page, 'btnPlay'); hideAll(page, 'btnResume'); hideAll(page, 'btnInstantMix'); hideAll(page, 'btnShuffle'); } return canPlay; } function reloadUserDataButtons(page, item) { var i; var length; var btnPlaystates = page.querySelectorAll('.btnPlaystate'); for (i = 0, length = btnPlaystates.length; i < length; i++) { var btnPlaystate = btnPlaystates[i]; if (itemHelper.canMarkPlayed(item)) { btnPlaystate.classList.remove('hide'); btnPlaystate.setItem(item); } else { btnPlaystate.classList.add('hide'); btnPlaystate.setItem(null); } } var btnUserRatings = page.querySelectorAll('.btnUserRating'); for (i = 0, length = btnUserRatings.length; i < length; i++) { var btnUserRating = btnUserRatings[i]; if (itemHelper.canRate(item)) { btnUserRating.classList.remove('hide'); btnUserRating.setItem(item); } else { btnUserRating.classList.add('hide'); btnUserRating.setItem(null); } } } function getArtistLinksHtml(artists, serverId, context) { var html = []; for (const artist of artists) { var href = appRouter.getRouteUrl(artist, { context: context, itemType: 'MusicArtist', serverId: serverId }); html.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + href + '">' + artist.Name + '</a>'); } html = html.join(' / '); return html; } /** * Renders the item's name block * @param {Object} item - Item used to render the name. * @param {HTMLDivElement} container - Container to render the information into. * @param {Object} context - Application context. */ function renderName(item, container, context) { var parentRoute; var parentNameHtml = []; var parentNameLast = false; if (item.AlbumArtists) { parentNameHtml.push(getArtistLinksHtml(item.AlbumArtists, item.ServerId, context)); parentNameLast = true; } else if (item.ArtistItems && item.ArtistItems.length && 'MusicVideo' === item.Type) { parentNameHtml.push(getArtistLinksHtml(item.ArtistItems, item.ServerId, context)); parentNameLast = true; } else if (item.SeriesName && 'Episode' === item.Type) { parentRoute = appRouter.getRouteUrl({ Id: item.SeriesId, Name: item.SeriesName, Type: 'Series', IsFolder: true, ServerId: item.ServerId }, { context: context }); parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeriesName + '</a>'); } else if (item.IsSeries || item.EpisodeTitle) { parentNameHtml.push(item.Name); } if (item.SeriesName && 'Season' === item.Type) { parentRoute = appRouter.getRouteUrl({ Id: item.SeriesId, Name: item.SeriesName, Type: 'Series', IsFolder: true, ServerId: item.ServerId }, { context: context }); parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeriesName + '</a>'); } else if (null != item.ParentIndexNumber && 'Episode' === item.Type) { parentRoute = appRouter.getRouteUrl({ Id: item.SeasonId, Name: item.SeasonName, Type: 'Season', IsFolder: true, ServerId: item.ServerId }, { context: context }); parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeasonName + '</a>'); } else if (null != item.ParentIndexNumber && item.IsSeries) { parentNameHtml.push(item.SeasonName || 'S' + item.ParentIndexNumber); } else if (item.Album && item.AlbumId && ('MusicVideo' === item.Type || 'Audio' === item.Type)) { parentRoute = appRouter.getRouteUrl({ Id: item.AlbumId, Name: item.Album, Type: 'MusicAlbum', IsFolder: true, ServerId: item.ServerId }, { context: context }); parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.Album + '</a>'); } else if (item.Album) { parentNameHtml.push(item.Album); } // FIXME: This whole section needs some refactoring, so it becames easier to scale across all form factors. See GH #1022 var html = ''; var tvShowHtml = parentNameHtml[0]; var tvSeasonHtml = parentNameHtml[1]; if (parentNameHtml.length) { if (parentNameLast) { // Music if (layoutManager.mobile) { html = '<h3 class="parentName musicParentName">' + parentNameHtml.join('</br>') + '</h3>'; } else { html = '<h3 class="parentName musicParentName">' + parentNameHtml.join(' - ') + '</h3>'; } } else { html = '<h1 class="parentName">' + tvShowHtml + '</h1>'; } } var name = itemHelper.getDisplayName(item, { includeParentInfo: false }); if (html && !parentNameLast) { if (tvSeasonHtml) { html += '<h3 class="itemName infoText subtitle">' + tvSeasonHtml + ' - ' + name + '</h3>'; } else { html += '<h3 class="itemName infoText subtitle">' + name + '</h3>'; } } else if (item.OriginalTitle && item.OriginalTitle != item.Name) { html = '<h1 class="itemName infoText parentNameLast withOriginalTitle">' + name + '</h1>' + html; } else { html = '<h1 class="itemName infoText parentNameLast">' + name + '</h1>' + html; } if (item.OriginalTitle && item.OriginalTitle != item.Name) { html += '<h4 class="itemName infoText originalTitle">' + item.OriginalTitle + '</h4>'; } container.innerHTML = html; if (html.length) { container.classList.remove('hide'); } else { container.classList.add('hide'); } } function setTrailerButtonVisibility(page, item) { if ((item.LocalTrailerCount || item.RemoteTrailers && item.RemoteTrailers.length) && -1 !== playbackManager.getSupportedCommands().indexOf('PlayTrailers')) { hideAll(page, 'btnPlayTrailer', true); } else { hideAll(page, 'btnPlayTrailer'); } } function renderBackdrop(item) { if (dom.getWindowSize().innerWidth >= 1000) { backdrop.setBackdrops([item]); } else { backdrop.clear(); } } function renderDetailPageBackdrop(page, item, apiClient) { var imgUrl; var hasbackdrop = false; var itemBackdropElement = page.querySelector('#itemBackdrop'); if (!layoutManager.mobile && !userSettings.detailsBanner()) { return false; } if (item.BackdropImageTags && item.BackdropImageTags.length) { imgUrl = apiClient.getScaledImageUrl(item.Id, { type: 'Backdrop', maxWidth: dom.getScreenWidth(), index: 0, tag: item.BackdropImageTags[0] }); imageLoader.lazyImage(itemBackdropElement, imgUrl); hasbackdrop = true; } else if (item.ParentBackdropItemId && item.ParentBackdropImageTags && item.ParentBackdropImageTags.length) { imgUrl = apiClient.getScaledImageUrl(item.ParentBackdropItemId, { type: 'Backdrop', maxWidth: dom.getScreenWidth(), index: 0, tag: item.ParentBackdropImageTags[0] }); imageLoader.lazyImage(itemBackdropElement, imgUrl); hasbackdrop = true; } else { itemBackdropElement.style.backgroundImage = ''; } return hasbackdrop; } function reloadFromItem(instance, page, params, item, user) { const apiClient = connectionManager.getApiClient(item.ServerId); Emby.Page.setTitle(''); // Start rendering the artwork first renderImage(page, item); renderLogo(page, item, apiClient); renderBackdrop(item); renderDetailPageBackdrop(page, item, apiClient); // Render the main information for the item page.querySelector('.detailPagePrimaryContainer').classList.add('detailRibbon'); renderName(item, page.querySelector('.nameContainer'), params.context); renderDetails(page, item, apiClient, params.context); renderTrackSelections(page, instance, item); renderSeriesTimerEditor(page, item, apiClient, user); renderTimerEditor(page, item, apiClient, user); setInitialCollapsibleState(page, item, apiClient, params.context, user); var canPlay = reloadPlayButtons(page, item); if ((item.LocalTrailerCount || item.RemoteTrailers && item.RemoteTrailers.length) && -1 !== playbackManager.getSupportedCommands().indexOf('PlayTrailers')) { hideAll(page, 'btnPlayTrailer', true); } else { hideAll(page, 'btnPlayTrailer'); } setTrailerButtonVisibility(page, item); if ('Program' !== item.Type || canPlay) { hideAll(page, 'mainDetailButtons', true); } else { hideAll(page, 'mainDetailButtons'); } showRecordingFields(instance, page, item, user); var groupedVersions = (item.MediaSources || []).filter(function (g) { return 'Grouping' == g.Type; }); if (user.Policy.IsAdministrator && groupedVersions.length) { page.querySelector('.btnSplitVersions').classList.remove('hide'); } else { page.querySelector('.btnSplitVersions').classList.add('hide'); } if (itemContextMenu.getCommands(getContextMenuOptions(item, user)).length) { hideAll(page, 'btnMoreCommands', true); } else { hideAll(page, 'btnMoreCommands'); } var itemBirthday = page.querySelector('#itemBirthday'); if ('Person' == item.Type && item.PremiereDate) { try { var birthday = datetime.parseISO8601Date(item.PremiereDate, true).toDateString(); itemBirthday.classList.remove('hide'); itemBirthday.innerHTML = globalize.translate('BirthDateValue', birthday); } catch (err) { itemBirthday.classList.add('hide'); } } else { itemBirthday.classList.add('hide'); } var itemDeathDate = page.querySelector('#itemDeathDate'); if ('Person' == item.Type && item.EndDate) { try { var deathday = datetime.parseISO8601Date(item.EndDate, true).toDateString(); itemDeathDate.classList.remove('hide'); itemDeathDate.innerHTML = globalize.translate('DeathDateValue', deathday); } catch (err) { itemDeathDate.classList.add('hide'); } } else { itemDeathDate.classList.add('hide'); } var itemBirthLocation = page.querySelector('#itemBirthLocation'); if ('Person' == item.Type && item.ProductionLocations && item.ProductionLocations.length) { var gmap = '<a is="emby-linkbutton" class="button-link textlink" target="_blank" href="https://maps.google.com/maps?q=' + item.ProductionLocations[0] + '">' + item.ProductionLocations[0] + '</a>'; itemBirthLocation.classList.remove('hide'); itemBirthLocation.innerHTML = globalize.translate('BirthPlaceValue', gmap); } else { itemBirthLocation.classList.add('hide'); } setPeopleHeader(page, item); loading.hide(); if (item.Type === 'Book') { hideAll(page, 'btnDownload', true); } require(['autoFocuser'], function (autoFocuser) { autoFocuser.autoFocus(page); }); } function logoImageUrl(item, apiClient, options) { options = options || {}; options.type = 'Logo'; if (item.ImageTags && item.ImageTags.Logo) { options.tag = item.ImageTags.Logo; return apiClient.getScaledImageUrl(item.Id, options); } if (item.ParentLogoImageTag) { options.tag = item.ParentLogoImageTag; return apiClient.getScaledImageUrl(item.ParentLogoItemId, options); } return null; } function renderLogo(page, item, apiClient) { var detailLogo = page.querySelector('.detailLogo'); var url = logoImageUrl(item, apiClient, {}); if (!layoutManager.mobile && !userSettings.enableBackdrops()) { detailLogo.classList.add('hide'); } else if (url) { detailLogo.classList.remove('hide'); imageLoader.setLazyImage(detailLogo, url); } else { detailLogo.classList.add('hide'); } } function showRecordingFields(instance, page, item, user) { if (!instance.currentRecordingFields) { var recordingFieldsElement = page.querySelector('.recordingFields'); if ('Program' == item.Type && user.Policy.EnableLiveTvManagement) { require(['recordingFields'], function (recordingFields) { instance.currentRecordingFields = new recordingFields({ parent: recordingFieldsElement, programId: item.Id, serverId: item.ServerId }); recordingFieldsElement.classList.remove('hide'); }); } else { recordingFieldsElement.classList.add('hide'); recordingFieldsElement.innerHTML = ''; } } } function renderLinks(page, item) { var externalLinksElem = page.querySelector('.itemExternalLinks'); var links = []; if (!layoutManager.tv && item.HomePageUrl) { links.push(`<a is="emby-linkbutton" class="button-link" href="${item.HomePageUrl}" target="_blank">${globalize.translate('ButtonWebsite')}</a>`); } if (item.ExternalUrls) { for (const url of item.ExternalUrls) { links.push(`<a is="emby-linkbutton" class="button-link" href="${url.Url}" target="_blank">${url.Name}</a>`); } } var html = []; if (links.length) { html.push(links.join(', ')); } externalLinksElem.innerHTML = html.join(', '); if (html.length) { externalLinksElem.classList.remove('hide'); } else { externalLinksElem.classList.add('hide'); } } function renderDetailImage(elem, item, imageLoader) { const itemArray = []; itemArray.push(item); const cardHtml = cardBuilder.getCardsHtml(itemArray, { shape: 'auto', showTitle: false, centerText: true, overlayText: false, transition: false, disableIndicators: true, disableHoverMenu: true, overlayPlayButton: true, width: dom.getWindowSize().innerWidth * 0.25 }); elem.innerHTML = cardHtml; imageLoader.lazyChildren(elem); } function renderImage(page, item) { renderDetailImage( page.querySelector('.detailImageContainer'), item, imageLoader ); } function refreshDetailImageUserData(elem, item) { elem.querySelector('.detailImageProgressContainer').innerHTML = indicators.getProgressBarHtml(item); } function refreshImage(page, item) { refreshDetailImageUserData(page.querySelector('.detailImageContainer'), item); } function setPeopleHeader(page, item) { if ('Audio' == item.MediaType || 'MusicAlbum' == item.Type || 'Book' == item.MediaType || 'Photo' == item.MediaType) { page.querySelector('#peopleHeader').innerHTML = globalize.translate('HeaderPeople'); } else { page.querySelector('#peopleHeader').innerHTML = globalize.translate('HeaderCastAndCrew'); } } function renderNextUp(page, item, user) { var section = page.querySelector('.nextUpSection'); if ('Series' != item.Type) { return void section.classList.add('hide'); } connectionManager.getApiClient(item.ServerId).getNextUpEpisodes({ SeriesId: item.Id, UserId: user.Id }).then(function (result) { if (result.Items.length) { section.classList.remove('hide'); } else { section.classList.add('hide'); } var html = cardBuilder.getCardsHtml({ items: result.Items, shape: 'overflowBackdrop', showTitle: true, displayAsSpecial: 'Season' == item.Type && item.IndexNumber, overlayText: false, centerText: true, overlayPlayButton: true }); var itemsContainer = section.querySelector('.nextUpItems'); itemsContainer.innerHTML = html; imageLoader.lazyChildren(itemsContainer); }); } function setInitialCollapsibleState(page, item, apiClient, context, user) { page.querySelector('.collectionItems').innerHTML = ''; if ('Playlist' == item.Type) { page.querySelector('#childrenCollapsible').classList.remove('hide'); renderPlaylistItems(page, item); } else if ('Studio' == item.Type || 'Person' == item.Type || 'Genre' == item.Type || 'MusicGenre' == item.Type || 'MusicArtist' == item.Type) { page.querySelector('#childrenCollapsible').classList.remove('hide'); renderItemsByName(page, item); } else if (item.IsFolder) { if ('BoxSet' == item.Type) { page.querySelector('#childrenCollapsible').classList.add('hide'); } renderChildren(page, item); } else { page.querySelector('#childrenCollapsible').classList.add('hide'); } if ('Series' == item.Type) { renderSeriesSchedule(page, item); renderNextUp(page, item, user); } else { page.querySelector('.nextUpSection').classList.add('hide'); } renderScenes(page, item); if (item.SpecialFeatureCount && 0 != item.SpecialFeatureCount && 'Series' != item.Type) { page.querySelector('#specialsCollapsible').classList.remove('hide'); renderSpecials(page, item, user, 6); } else { page.querySelector('#specialsCollapsible').classList.add('hide'); } renderCast(page, item); if (item.PartCount && item.PartCount > 1) { page.querySelector('#additionalPartsCollapsible').classList.remove('hide'); renderAdditionalParts(page, item, user); } else { page.querySelector('#additionalPartsCollapsible').classList.add('hide'); } if ('MusicAlbum' == item.Type) { renderMusicVideos(page, item, user); } else { page.querySelector('#musicVideosCollapsible').classList.add('hide'); } } function toggleLineClamp(clampTarget, e) { var expandButton = e.target; var clampClassName = 'detail-clamp-text'; if (clampTarget.classList.contains(clampClassName)) { clampTarget.classList.remove(clampClassName); expandButton.innerHTML = globalize.translate('ShowLess'); } else { clampTarget.classList.add(clampClassName); expandButton.innerHTML = globalize.translate('ShowMore'); } } function renderOverview(page, item) { for (const overviewElemnt of page.querySelectorAll('.overview')) { var overview = item.Overview || ''; if (overview) { overviewElemnt.innerHTML = overview; overviewElemnt.classList.remove('hide'); overviewElemnt.classList.add('detail-clamp-text'); // Grab the sibling element to control the expand state var expandButton = overviewElemnt.parentElement.querySelector('.overview-expand'); // Detect if we have overflow of text. Based on this StackOverflow answer // https://stackoverflow.com/a/35157976 if (Math.abs(overviewElemnt.scrollHeight - overviewElemnt.offsetHeight) > 2) { expandButton.classList.remove('hide'); } else { expandButton.classList.add('hide'); } expandButton.addEventListener('click', toggleLineClamp.bind(null, overviewElemnt)); for (const anchor of overviewElemnt.querySelectorAll('a')) { anchor.setAttribute('target', '_blank'); } } else { overviewElemnt.innerHTML = ''; overviewElemnt.classList.add('hide'); } } } function renderGenres(page, item, context = inferContext(item)) { var genres = item.GenreItems || []; var type = context === 'music' ? 'MusicGenre' : 'Genre'; var html = genres.map(function (p) { return '<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + appRouter.getRouteUrl({ Name: p.Name, Type: type, ServerId: item.ServerId, Id: p.Id }, { context: context }) + '">' + p.Name + '</a>'; }).join(', '); var genresLabel = page.querySelector('.genresLabel'); genresLabel.innerHTML = globalize.translate(genres.length > 1 ? 'Genres' : 'Genre'); var genresValue = page.querySelector('.genres'); genresValue.innerHTML = html; var genresGroup = page.querySelector('.genresGroup'); if (genres.length) { genresGroup.classList.remove('hide'); } else { genresGroup.classList.add('hide'); } } function renderWriter(page, item, context) { var writers = (item.People || []).filter(function (person) { return person.Type === 'Writer'; }); var html = writers.map(function (person) { return '<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + appRouter.getRouteUrl({ Name: person.Name, Type: 'Person', ServerId: item.ServerId, Id: person.Id }, { context: context }) + '">' + person.Name + '</a>'; }).join(', '); var writersLabel = page.querySelector('.writersLabel'); writersLabel.innerHTML = globalize.translate(writers.length > 1 ? 'Writers' : 'Writer'); var writersValue = page.querySelector('.writers'); writersValue.innerHTML = html; var writersGroup = page.querySelector('.writersGroup'); if (writers.length) { writersGroup.classList.remove('hide'); } else { writersGroup.classList.add('hide'); } } function renderDirector(page, item, context) { var directors = (item.People || []).filter(function (person) { return person.Type === 'Director'; }); var html = directors.map(function (person) { return '<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + appRouter.getRouteUrl({ Name: person.Name, Type: 'Person', ServerId: item.ServerId, Id: person.Id }, { context: context }) + '">' + person.Name + '</a>'; }).join(', '); var directorsLabel = page.querySelector('.directorsLabel'); directorsLabel.innerHTML = globalize.translate(directors.length > 1 ? 'Directors' : 'Director'); var directorsValue = page.querySelector('.directors'); directorsValue.innerHTML = html; var directorsGroup = page.querySelector('.directorsGroup'); if (directors.length) { directorsGroup.classList.remove('hide'); } else { directorsGroup.classList.add('hide'); } } function renderMiscInfo(page, item) { const primaryItemMiscInfo = page.querySelectorAll('.itemMiscInfo-primary'); for (const miscInfo of primaryItemMiscInfo) { mediaInfo.fillPrimaryMediaInfo(miscInfo, item, { interactive: true, episodeTitle: false, subtitles: false }); if (miscInfo.innerHTML && 'SeriesTimer' !== item.Type) { miscInfo.classList.remove('hide'); } else { miscInfo.classList.add('hide'); } } const secondaryItemMiscInfo = page.querySelectorAll('.itemMiscInfo-secondary'); for (const miscInfo of secondaryItemMiscInfo) { mediaInfo.fillSecondaryMediaInfo(miscInfo, item, { interactive: true }); if (miscInfo.innerHTML && 'SeriesTimer' !== item.Type) { miscInfo.classList.remove('hide'); } else { miscInfo.classList.add('hide'); } } } function renderTagline(page, item) { var taglineElement = page.querySelector('.tagline'); if (item.Taglines && item.Taglines.length) { taglineElement.classList.remove('hide'); taglineElement.innerHTML = item.Taglines[0]; } else { taglineElement.classList.add('hide'); } } function renderDetails(page, item, apiClient, context, isStatic) { renderSimilarItems(page, item, context); renderMoreFromSeason(page, item, apiClient); renderMoreFromArtist(page, item, apiClient); renderDirector(page, item, context); renderWriter(page, item, context); renderGenres(page, item, context); renderChannelGuide(page, apiClient, item); renderTagline(page, item); renderOverview(page, item); renderMiscInfo(page, item); reloadUserDataButtons(page, item); renderLinks(page, item); renderTags(page, item); renderSeriesAirTime(page, item, isStatic); } function enableScrollX() { return browser.mobile && screen.availWidth <= 1000; } function getPortraitShape(scrollX) { if (null == scrollX) { scrollX = enableScrollX(); } return scrollX ? 'overflowPortrait' : 'portrait'; } function getSquareShape(scrollX) { if (null == scrollX) { scrollX = enableScrollX(); } return scrollX ? 'overflowSquare' : 'square'; } function renderMoreFromSeason(view, item, apiClient) { var section = view.querySelector('.moreFromSeasonSection'); if (section) { if ('Episode' !== item.Type || !item.SeasonId || !item.SeriesId) { return void section.classList.add('hide'); } var userId = apiClient.getCurrentUserId(); apiClient.getEpisodes(item.SeriesId, { SeasonId: item.SeasonId, UserId: userId, Fields: 'ItemCounts,PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount' }).then(function (result) { if (result.Items.length < 2) { return void section.classList.add('hide'); } section.classList.remove('hide'); section.querySelector('h2').innerHTML = globalize.translate('MoreFromValue', item.SeasonName); var itemsContainer = section.querySelector('.itemsContainer'); cardBuilder.buildCards(result.Items, { parentContainer: section, itemsContainer: itemsContainer, shape: 'autooverflow', sectionTitleTagName: 'h2', scalable: true, showTitle: true, overlayText: false, centerText: true, includeParentInfoInTitle: false, allowBottomPadding: false }); var card = itemsContainer.querySelector('.card[data-id="' + item.Id + '"]'); if (card) { setTimeout(function () { section.querySelector('.emby-scroller').toStart(card.previousSibling || card, true); }, 100); } }); } } function renderMoreFromArtist(view, item, apiClient) { var section = view.querySelector('.moreFromArtistSection'); if (section) { if ('MusicArtist' === item.Type) { if (!apiClient.isMinServerVersion('3.4.1.19')) { return void section.classList.add('hide'); } } else if ('MusicAlbum' !== item.Type || !item.AlbumArtists || !item.AlbumArtists.length) { return void section.classList.add('hide'); } var query = { IncludeItemTypes: 'MusicAlbum', Recursive: true, ExcludeItemIds: item.Id, SortBy: 'ProductionYear,SortName', SortOrder: 'Descending' }; if ('MusicArtist' === item.Type) { query.ContributingArtistIds = item.Id; } else if (apiClient.isMinServerVersion('3.4.1.18')) { query.AlbumArtistIds = item.AlbumArtists[0].Id; } else { query.ArtistIds = item.AlbumArtists[0].Id; } apiClient.getItems(apiClient.getCurrentUserId(), query).then(function (result) { if (!result.Items.length) { return void section.classList.add('hide'); } section.classList.remove('hide'); if ('MusicArtist' === item.Type) { section.querySelector('h2').innerHTML = globalize.translate('HeaderAppearsOn'); } else { section.querySelector('h2').innerHTML = globalize.translate('MoreFromValue', item.AlbumArtists[0].Name); } cardBuilder.buildCards(result.Items, { parentContainer: section, itemsContainer: section.querySelector('.itemsContainer'), shape: 'autooverflow', sectionTitleTagName: 'h2', scalable: true, coverImage: 'MusicArtist' === item.Type || 'MusicAlbum' === item.Type, showTitle: true, showParentTitle: false, centerText: true, overlayText: false, overlayPlayButton: true, showYear: true }); }); } } function renderSimilarItems(page, item, context) { var similarCollapsible = page.querySelector('#similarCollapsible'); if (similarCollapsible) { if ('Movie' != item.Type && 'Trailer' != item.Type && 'Series' != item.Type && 'Program' != item.Type && 'Recording' != item.Type && 'MusicAlbum' != item.Type && 'MusicArtist' != item.Type && 'Playlist' != item.Type) { return void similarCollapsible.classList.add('hide'); } similarCollapsible.classList.remove('hide'); var apiClient = connectionManager.getApiClient(item.ServerId); var options = { userId: apiClient.getCurrentUserId(), limit: 12, fields: 'PrimaryImageAspectRatio,UserData,CanDelete' }; if ('MusicAlbum' == item.Type && item.AlbumArtists && item.AlbumArtists.length) { options.ExcludeArtistIds = item.AlbumArtists[0].Id; } apiClient.getSimilarItems(item.Id, options).then(function (result) { if (!result.Items.length) { return void similarCollapsible.classList.add('hide'); } similarCollapsible.classList.remove('hide'); var html = ''; html += cardBuilder.getCardsHtml({ items: result.Items, shape: 'autooverflow', showParentTitle: 'MusicAlbum' == item.Type, centerText: true, showTitle: true, context: context, lazy: true, showDetailsMenu: true, coverImage: 'MusicAlbum' == item.Type || 'MusicArtist' == item.Type, overlayPlayButton: true, overlayText: false, showYear: 'Movie' === item.Type || 'Trailer' === item.Type || 'Series' === item.Type }); var similarContent = similarCollapsible.querySelector('.similarContent'); similarContent.innerHTML = html; imageLoader.lazyChildren(similarContent); }); } } function renderSeriesAirTime(page, item, isStatic) { var seriesAirTime = page.querySelector('#seriesAirTime'); if ('Series' != item.Type) { seriesAirTime.classList.add('hide'); return; } var html = ''; if (item.AirDays && item.AirDays.length) { if (7 == item.AirDays.length) { html += 'daily'; } else { html += item.AirDays.map(function (a) { return a + 's'; }).join(','); } } if (item.AirTime) { html += ' at ' + item.AirTime; } if (item.Studios.length) { if (isStatic) { html += ' on ' + item.Studios[0].Name; } else { var context = inferContext(item); var href = appRouter.getRouteUrl(item.Studios[0], { context: context, itemType: 'Studio', serverId: item.ServerId }); html += ' on <a class="textlink button-link" is="emby-linkbutton" href="' + href + '">' + item.Studios[0].Name + '</a>'; } } if (html) { html = ('Ended' == item.Status ? 'Aired ' : 'Airs ') + html; seriesAirTime.innerHTML = html; seriesAirTime.classList.remove('hide'); } else { seriesAirTime.classList.add('hide'); } } function renderTags(page, item) { var itemTags = page.querySelector('.itemTags'); var tagElements = []; var tags = item.Tags || []; if ('Program' === item.Type) { tags = []; } for (var i = 0, length = tags.length; i < length; i++) { tagElements.push(tags[i]); } if (tagElements.length) { itemTags.innerHTML = globalize.translate('TagsValue', tagElements.join(', ')); itemTags.classList.remove('hide'); } else { itemTags.innerHTML = ''; itemTags.classList.add('hide'); } } function renderChildren(page, item) { var fields = 'ItemCounts,PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount'; var query = { ParentId: item.Id, Fields: fields }; if ('BoxSet' !== item.Type) { query.SortBy = 'SortName'; } var promise; var apiClient = connectionManager.getApiClient(item.ServerId); var userId = apiClient.getCurrentUserId(); if ('Series' == item.Type) { promise = apiClient.getSeasons(item.Id, { userId: userId, Fields: fields }); } else if ('Season' == item.Type) { fields += ',Overview'; promise = apiClient.getEpisodes(item.SeriesId, { seasonId: item.Id, userId: userId, Fields: fields }); } else if ('MusicArtist' == item.Type) { query.SortBy = 'ProductionYear,SortName'; } promise = promise || apiClient.getItems(apiClient.getCurrentUserId(), query); promise.then(function (result) { var html = ''; var scrollX = false; var isList = false; var childrenItemsContainer = page.querySelector('.childrenItemsContainer'); if ('MusicAlbum' == item.Type) { html = listView.getListViewHtml({ items: result.Items, smallIcon: true, showIndex: true, index: 'disc', showIndexNumberLeft: true, playFromHere: true, action: 'playallfromhere', image: false, artist: 'auto', containerAlbumArtists: item.AlbumArtists }); isList = true; } else if ('Series' == item.Type) { scrollX = enableScrollX(); html = cardBuilder.getCardsHtml({ items: result.Items, shape: 'overflowPortrait', showTitle: true, centerText: true, lazy: true, overlayPlayButton: true, allowBottomPadding: !scrollX }); } else if ('Season' == item.Type || 'Episode' == item.Type) { if ('Episode' !== item.Type) { isList = true; } scrollX = 'Episode' == item.Type; if (result.Items.length < 2 && 'Episode' === item.Type) { return; } if ('Episode' === item.Type) { html = cardBuilder.getCardsHtml({ items: result.Items, shape: 'overflowBackdrop', showTitle: true, displayAsSpecial: 'Season' == item.Type && item.IndexNumber, playFromHere: true, overlayText: true, lazy: true, showDetailsMenu: true, overlayPlayButton: true, allowBottomPadding: !scrollX, includeParentInfoInTitle: false }); } else if ('Season' === item.Type) { html = listView.getListViewHtml({ items: result.Items, showIndexNumber: false, enableOverview: true, enablePlayedButton: layoutManager.mobile ? false : true, infoButton: layoutManager.mobile ? false : true, imageSize: 'large', enableSideMediaInfo: false, highlight: false, action: layoutManager.tv ? 'resume' : 'none', imagePlayButton: true, includeParentInfoInTitle: false }); } } if ('BoxSet' !== item.Type) { page.querySelector('#childrenCollapsible').classList.remove('hide'); } if (scrollX) { childrenItemsContainer.classList.add('scrollX'); childrenItemsContainer.classList.add('hiddenScrollX'); childrenItemsContainer.classList.remove('vertical-wrap'); childrenItemsContainer.classList.remove('vertical-list'); } else { childrenItemsContainer.classList.remove('scrollX'); childrenItemsContainer.classList.remove('hiddenScrollX'); childrenItemsContainer.classList.remove('smoothScrollX'); if (isList) { childrenItemsContainer.classList.add('vertical-list'); childrenItemsContainer.classList.remove('vertical-wrap'); } else { childrenItemsContainer.classList.add('vertical-wrap'); childrenItemsContainer.classList.remove('vertical-list'); } } if (layoutManager.mobile) { childrenItemsContainer.classList.remove('padded-right'); } childrenItemsContainer.innerHTML = html; imageLoader.lazyChildren(childrenItemsContainer); if ('BoxSet' == item.Type) { var collectionItemTypes = [{ name: globalize.translate('HeaderVideos'), mediaType: 'Video' }, { name: globalize.translate('HeaderSeries'), type: 'Series' }, { name: globalize.translate('HeaderAlbums'), type: 'MusicAlbum' }, { name: globalize.translate('HeaderBooks'), type: 'Book' }]; renderCollectionItems(page, item, collectionItemTypes, result.Items); } }); if ('Season' == item.Type) { page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderEpisodes'); } else if ('Series' == item.Type) { page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderSeasons'); } else if ('MusicAlbum' == item.Type) { page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderTracks'); } else { page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderItems'); } if ('MusicAlbum' == item.Type || 'Season' == item.Type) { page.querySelector('.childrenSectionHeader').classList.add('hide'); page.querySelector('#childrenCollapsible').classList.add('verticalSection-extrabottompadding'); } else { page.querySelector('.childrenSectionHeader').classList.remove('hide'); } } function renderItemsByName(page, item) { require('scripts/itembynamedetailpage'.split(','), function () { window.ItemsByName.renderItems(page, item); }); } function renderPlaylistItems(page, item) { require('scripts/playlistedit'.split(','), function () { PlaylistViewer.render(page, item); }); } function renderProgramsForChannel(page, result) { var html = ''; var currentItems = []; var currentStartDate = null; for (var i = 0, length = result.Items.length; i < length; i++) { var item = result.Items[i]; var itemStartDate = datetime.parseISO8601Date(item.StartDate); if (!(currentStartDate && currentStartDate.toDateString() === itemStartDate.toDateString())) { if (currentItems.length) { html += '<div class="verticalSection verticalDetailSection">'; html += '<h2 class="sectionTitle padded-left">' + datetime.toLocaleDateString(currentStartDate, { weekday: 'long', month: 'long', day: 'numeric' }) + '</h2>'; html += '<div is="emby-itemscontainer" class="vertical-list padded-left padded-right">' + listView.getListViewHtml({ items: currentItems, enableUserDataButtons: false, showParentTitle: true, image: false, showProgramTime: true, mediaInfo: false, parentTitleWithTitle: true }) + '</div></div>'; } currentStartDate = itemStartDate; currentItems = []; } currentItems.push(item); } if (currentItems.length) { html += '<div class="verticalSection verticalDetailSection">'; html += '<h2 class="sectionTitle padded-left">' + datetime.toLocaleDateString(currentStartDate, { weekday: 'long', month: 'long', day: 'numeric' }) + '</h2>'; html += '<div is="emby-itemscontainer" class="vertical-list padded-left padded-right">' + listView.getListViewHtml({ items: currentItems, enableUserDataButtons: false, showParentTitle: true, image: false, showProgramTime: true, mediaInfo: false, parentTitleWithTitle: true }) + '</div></div>'; } page.querySelector('.programGuide').innerHTML = html; } function renderChannelGuide(page, apiClient, item) { if ('TvChannel' === item.Type) { page.querySelector('.programGuideSection').classList.remove('hide'); apiClient.getLiveTvPrograms({ ChannelIds: item.Id, UserId: apiClient.getCurrentUserId(), HasAired: false, SortBy: 'StartDate', EnableTotalRecordCount: false, EnableImages: false, ImageTypeLimit: 0, EnableUserData: false }).then(function (result) { renderProgramsForChannel(page, result); }); } } function renderSeriesSchedule(page, item) { var apiClient = connectionManager.getApiClient(item.ServerId); apiClient.getLiveTvPrograms({ UserId: apiClient.getCurrentUserId(), HasAired: false, SortBy: 'StartDate', EnableTotalRecordCount: false, EnableImages: false, ImageTypeLimit: 0, Limit: 50, EnableUserData: false, LibrarySeriesId: item.Id }).then(function (result) { if (result.Items.length) { page.querySelector('#seriesScheduleSection').classList.remove('hide'); } else { page.querySelector('#seriesScheduleSection').classList.add('hide'); } page.querySelector('#seriesScheduleList').innerHTML = listView.getListViewHtml({ items: result.Items, enableUserDataButtons: false, showParentTitle: false, image: false, showProgramDateTime: true, mediaInfo: false, showTitle: true, moreButton: false, action: 'programdialog' }); loading.hide(); }); } function inferContext(item) { if ('Movie' === item.Type || 'BoxSet' === item.Type) { return 'movies'; } if ('Series' === item.Type || 'Season' === item.Type || 'Episode' === item.Type) { return 'tvshows'; } if ('MusicArtist' === item.Type || 'MusicAlbum' === item.Type || 'Audio' === item.Type || 'AudioBook' === item.Type) { return 'music'; } if ('Program' === item.Type) { return 'livetv'; } return null; } function filterItemsByCollectionItemType(items, typeInfo) { return items.filter(function (item) { if (typeInfo.mediaType) { return item.MediaType == typeInfo.mediaType; } return item.Type == typeInfo.type; }); } function canPlaySomeItemInCollection(items) { var i = 0; for (var length = items.length; i < length; i++) { if (playbackManager.canPlay(items[i])) { return true; } } return false; } function renderCollectionItems(page, parentItem, types, items) { page.querySelector('.collectionItems').classList.remove('hide'); page.querySelector('.collectionItems').innerHTML = ''; for (const type of types) { var typeItems = filterItemsByCollectionItemType(items, type); if (typeItems.length) { renderCollectionItemType(page, parentItem, type, typeItems); } } var otherType = { name: globalize.translate('HeaderOtherItems') }; var otherTypeItems = items.filter(function (curr) { return !types.filter(function (t) { return filterItemsByCollectionItemType([curr], t).length > 0; }).length; }); if (otherTypeItems.length) { renderCollectionItemType(page, parentItem, otherType, otherTypeItems); } if (!items.length) { renderCollectionItemType(page, parentItem, { name: globalize.translate('HeaderItems') }, items); } var containers = page.querySelectorAll('.collectionItemsContainer'); var notifyRefreshNeeded = function () { renderChildren(page, parentItem); }; for (const container of containers) { container.notifyRefreshNeeded = notifyRefreshNeeded; } // if nothing in the collection can be played hide play and shuffle buttons if (!canPlaySomeItemInCollection(items)) { hideAll(page, 'btnPlay', false); hideAll(page, 'btnShuffle', false); } // HACK: Call autoFocuser again because btnPlay may be hidden, but focused by reloadFromItem // FIXME: Sometimes focus does not move until all (?) sections are loaded require(['autoFocuser'], function (autoFocuser) { autoFocuser.autoFocus(page); }); } function renderCollectionItemType(page, parentItem, type, items) { var html = ''; html += '<div class="verticalSection">'; html += '<div class="sectionTitleContainer sectionTitleContainer-cards padded-left">'; html += '<h2 class="sectionTitle sectionTitle-cards">'; html += '<span>' + type.name + '</span>'; html += '</h2>'; html += '<button class="btnAddToCollection sectionTitleButton" type="button" is="paper-icon-button-light" style="margin-left:1em;"><span class="material-icons add"></span></button>'; html += '</div>'; html += '<div is="emby-itemscontainer" class="itemsContainer collectionItemsContainer vertical-wrap padded-left padded-right">'; var shape = 'MusicAlbum' == type.type ? getSquareShape(false) : getPortraitShape(false); html += cardBuilder.getCardsHtml({ items: items, shape: shape, showTitle: true, showYear: 'Video' === type.mediaType || 'Series' === type.type, centerText: true, lazy: true, showDetailsMenu: true, overlayMoreButton: true, showAddToCollection: false, showRemoveFromCollection: true, collectionId: parentItem.Id }); html += '</div>'; html += '</div>'; var collectionItems = page.querySelector('.collectionItems'); collectionItems.insertAdjacentHTML('beforeend', html); imageLoader.lazyChildren(collectionItems); collectionItems.querySelector('.btnAddToCollection').addEventListener('click', function () { require(['alert'], function (alert) { alert({ text: globalize.translate('AddItemToCollectionHelp'), html: globalize.translate('AddItemToCollectionHelp') + '<br/><br/><a is="emby-linkbutton" class="button-link" target="_blank" href="https://web.archive.org/web/20181216120305/https://github.com/MediaBrowser/Wiki/wiki/Collections">' + globalize.translate('ButtonLearnMore') + '</a>' }); }); }); } function renderMusicVideos(page, item, user) { connectionManager.getApiClient(item.ServerId).getItems(user.Id, { SortBy: 'SortName', SortOrder: 'Ascending', IncludeItemTypes: 'MusicVideo', Recursive: true, Fields: 'PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount', AlbumIds: item.Id }).then(function (result) { if (result.Items.length) { page.querySelector('#musicVideosCollapsible').classList.remove('hide'); var musicVideosContent = page.querySelector('.musicVideosContent'); musicVideosContent.innerHTML = getVideosHtml(result.Items, user); imageLoader.lazyChildren(musicVideosContent); } else { page.querySelector('#musicVideosCollapsible').classList.add('hide'); } }); } function renderAdditionalParts(page, item, user) { connectionManager.getApiClient(item.ServerId).getAdditionalVideoParts(user.Id, item.Id).then(function (result) { if (result.Items.length) { page.querySelector('#additionalPartsCollapsible').classList.remove('hide'); var additionalPartsContent = page.querySelector('#additionalPartsContent'); additionalPartsContent.innerHTML = getVideosHtml(result.Items, user); imageLoader.lazyChildren(additionalPartsContent); } else { page.querySelector('#additionalPartsCollapsible').classList.add('hide'); } }); } function renderScenes(page, item) { var chapters = item.Chapters || []; if (chapters.length && !chapters[0].ImageTag && (chapters = []), chapters.length) { page.querySelector('#scenesCollapsible').classList.remove('hide'); var scenesContent = page.querySelector('#scenesContent'); require(['chaptercardbuilder'], function (chaptercardbuilder) { chaptercardbuilder.buildChapterCards(item, chapters, { itemsContainer: scenesContent, backdropShape: 'overflowBackdrop', squareShape: 'overflowSquare', imageBlurhashes: item.ImageBlurHashes }); }); } else { page.querySelector('#scenesCollapsible').classList.add('hide'); } } function getVideosHtml(items, user, limit, moreButtonClass) { var html = cardBuilder.getCardsHtml({ items: items, shape: 'auto', showTitle: true, action: 'play', overlayText: false, centerText: true, showRuntime: true }); if (limit && items.length > limit) { html += '<p style="margin: 0;padding-left:5px;"><button is="emby-button" type="button" class="raised more ' + moreButtonClass + '">' + globalize.translate('ButtonMore') + '</button></p>'; } return html; } function renderSpecials(page, item, user, limit) { connectionManager.getApiClient(item.ServerId).getSpecialFeatures(user.Id, item.Id).then(function (specials) { var specialsContent = page.querySelector('#specialsContent'); specialsContent.innerHTML = getVideosHtml(specials, user, limit, 'moreSpecials'); imageLoader.lazyChildren(specialsContent); }); } function renderCast(page, item) { var people = (item.People || []).filter(function (p) { return p.Type === 'Actor'; }); if (!people.length) { return void page.querySelector('#castCollapsible').classList.add('hide'); } page.querySelector('#castCollapsible').classList.remove('hide'); var castContent = page.querySelector('#castContent'); require(['peoplecardbuilder'], function (peoplecardbuilder) { peoplecardbuilder.buildPeopleCards(people, { itemsContainer: castContent, coverImage: true, serverId: item.ServerId, shape: 'overflowPortrait', imageBlurhashes: item.ImageBlurHashes }); }); } function itemDetailPage() { var self = this; self.setInitialCollapsibleState = setInitialCollapsibleState; self.renderDetails = renderDetails; self.renderCast = renderCast; } function bindAll(view, selector, eventName, fn) { var elems = view.querySelectorAll(selector); for (const elem of elems) { elem.addEventListener(eventName, fn); } } function onTrackSelectionsSubmit(e) { e.preventDefault(); return false; } window.ItemDetailPage = new itemDetailPage(); return function (view, params) { function reload(instance, page, params) { loading.show(); var apiClient = params.serverId ? connectionManager.getApiClient(params.serverId) : ApiClient; Promise.all([getPromise(apiClient, params), apiClient.getCurrentUser()]).then(([item, user]) => { currentItem = item; reloadFromItem(instance, page, params, item, user); }).catch((error) => { console.error('failed to get item or current user: ', error); }); } function splitVersions(instance, page, apiClient, params) { require(['confirm'], function (confirm) { confirm('Are you sure you wish to split the media sources into separate items?', 'Split Media Apart').then(function () { loading.show(); apiClient.ajax({ type: 'DELETE', url: apiClient.getUrl('Videos/' + params.id + '/AlternateSources') }).then(function () { loading.hide(); reload(instance, page, params); }); }); }); } function getPlayOptions(startPosition) { var audioStreamIndex = view.querySelector('.selectAudio').value || null; return { startPositionTicks: startPosition, mediaSourceId: view.querySelector('.selectSource').value, audioStreamIndex: audioStreamIndex, subtitleStreamIndex: view.querySelector('.selectSubtitles').value }; } function playItem(item, startPosition) { var playOptions = getPlayOptions(startPosition); playOptions.items = [item]; playbackManager.play(playOptions); } function playTrailer() { playbackManager.playTrailers(currentItem); } function playCurrentItem(button, mode) { var item = currentItem; if ('Program' === item.Type) { var apiClient = connectionManager.getApiClient(item.ServerId); return void apiClient.getLiveTvChannel(item.ChannelId, apiClient.getCurrentUserId()).then(function (channel) { playbackManager.play({ items: [channel] }); }); } playItem(item, item.UserData && mode === 'resume' ? item.UserData.PlaybackPositionTicks : 0); } function onPlayClick() { playCurrentItem(this, this.getAttribute('data-mode')); } function onInstantMixClick() { playbackManager.instantMix(currentItem); } function onShuffleClick() { playbackManager.shuffle(currentItem); } function onCancelSeriesTimerClick() { require(['recordingHelper'], function (recordingHelper) { recordingHelper.cancelSeriesTimerWithConfirmation(currentItem.Id, currentItem.ServerId).then(function () { Dashboard.navigate('livetv.html'); }); }); } function onCancelTimerClick() { require(['recordingHelper'], function (recordingHelper) { recordingHelper.cancelTimer(connectionManager.getApiClient(currentItem.ServerId), currentItem.TimerId).then(function () { reload(self, view, params); }); }); } function onPlayTrailerClick() { playTrailer(); } function onDownloadClick() { require(['fileDownloader'], function (fileDownloader) { var downloadHref = apiClient.getItemDownloadUrl(currentItem.Id); fileDownloader.download([{ url: downloadHref, itemId: currentItem.Id, serverId: currentItem.serverId }]); }); } function onMoreCommandsClick() { var button = this; apiClient.getCurrentUser().then(function (user) { itemContextMenu.show(getContextMenuOptions(currentItem, user, button)).then(function (result) { if (result.deleted) { appRouter.goHome(); } else if (result.updated) { reload(self, view, params); } }); }); } function onPlayerChange() { renderTrackSelections(view, self, currentItem); setTrailerButtonVisibility(view, currentItem); } function editImages() { return new Promise(function (resolve, reject) { require(['imageEditor'], function (imageEditor) { imageEditor.show({ itemId: currentItem.Id, serverId: currentItem.ServerId }).then(resolve, reject); }); }); } function onWebSocketMessage(e, data) { var msg = data; if ('UserDataChanged' === msg.MessageType && currentItem && msg.Data.UserId == apiClient.getCurrentUserId()) { var key = currentItem.UserData.Key; var userData = msg.Data.UserDataList.filter(function (u) { return u.Key == key; })[0]; if (userData) { currentItem.UserData = userData; reloadPlayButtons(view, currentItem); refreshImage(view, currentItem); } } } var currentItem; var self = this; var apiClient = params.serverId ? connectionManager.getApiClient(params.serverId) : ApiClient; view.querySelectorAll('.btnPlay'); bindAll(view, '.btnPlay', 'click', onPlayClick); bindAll(view, '.btnResume', 'click', onPlayClick); bindAll(view, '.btnInstantMix', 'click', onInstantMixClick); bindAll(view, '.btnShuffle', 'click', onShuffleClick); bindAll(view, '.btnPlayTrailer', 'click', onPlayTrailerClick); bindAll(view, '.btnCancelSeriesTimer', 'click', onCancelSeriesTimerClick); bindAll(view, '.btnCancelTimer', 'click', onCancelTimerClick); bindAll(view, '.btnDownload', 'click', onDownloadClick); view.querySelector('.trackSelections').addEventListener('submit', onTrackSelectionsSubmit); view.querySelector('.btnSplitVersions').addEventListener('click', function () { splitVersions(self, view, apiClient, params); }); bindAll(view, '.btnMoreCommands', 'click', onMoreCommandsClick); view.querySelector('.selectSource').addEventListener('change', function () { renderVideoSelections(view, self._currentPlaybackMediaSources); renderAudioSelections(view, self._currentPlaybackMediaSources); renderSubtitleSelections(view, self._currentPlaybackMediaSources); }); view.addEventListener('click', function (e) { if (dom.parentWithClass(e.target, 'moreScenes')) { renderScenes(view, currentItem); } else if (dom.parentWithClass(e.target, 'morePeople')) { renderCast(view, currentItem); } else if (dom.parentWithClass(e.target, 'moreSpecials')) { apiClient.getCurrentUser().then(function (user) { renderSpecials(view, currentItem, user); }); } }); view.querySelector('.detailImageContainer').addEventListener('click', function (e) { if (dom.parentWithClass(e.target, 'itemDetailGalleryLink')) { editImages().then(function () { reload(self, view, params); }); } }); view.addEventListener('viewshow', function (e) { var page = this; libraryMenu.setTransparentMenu(true); if (e.detail.isRestored) { if (currentItem) { Emby.Page.setTitle(''); renderTrackSelections(page, self, currentItem, true); } } else { reload(self, page, params); } events.on(apiClient, 'message', onWebSocketMessage); events.on(playbackManager, 'playerchange', onPlayerChange); }); view.addEventListener('viewbeforehide', function () { events.off(apiClient, 'message', onWebSocketMessage); events.off(playbackManager, 'playerchange', onPlayerChange); libraryMenu.setTransparentMenu(false); }); view.addEventListener('viewdestroy', function () { currentItem = null; self._currentPlaybackMediaSources = null; self.currentRecordingFields = null; }); }; });
1
16,589
How about that `scaleFactor`?
jellyfin-jellyfin-web
js
@@ -273,6 +273,19 @@ class NonDivArithmeticOpAnalyzer } } + /** + * @param int|float $result + */ + private static function getNumericalType($result): Type\Union + { + if ($result <= PHP_INT_MAX && $result >= PHP_INT_MIN) { + /** @var int $result */ + return Type::getInt(false, $result); + } + + return Type::getFloat($result); + } + /** * @param string[] &$invalid_left_messages * @param string[] &$invalid_right_messages
1
<?php namespace Psalm\Internal\Analyzer\Statements\Expression\BinaryOp; use PhpParser; use Psalm\CodeLocation; use Psalm\Config; use Psalm\Context; use Psalm\Internal\Analyzer\Statements\Expression\Assignment\ArrayAssignmentAnalyzer; use Psalm\Internal\Analyzer\StatementsAnalyzer; use Psalm\Internal\Type\TypeCombiner; use Psalm\Issue\FalseOperand; use Psalm\Issue\InvalidOperand; use Psalm\Issue\MixedOperand; use Psalm\Issue\NullOperand; use Psalm\Issue\PossiblyFalseOperand; use Psalm\Issue\PossiblyInvalidOperand; use Psalm\Issue\PossiblyNullOperand; use Psalm\Issue\StringIncrement; use Psalm\IssueBuffer; use Psalm\StatementsSource; use Psalm\Type; use Psalm\Type\Atomic\TArray; use Psalm\Type\Atomic\TFalse; use Psalm\Type\Atomic\TFloat; use Psalm\Type\Atomic\TInt; use Psalm\Type\Atomic\TKeyedArray; use Psalm\Type\Atomic\TList; use Psalm\Type\Atomic\TLiteralInt; use Psalm\Type\Atomic\TMixed; use Psalm\Type\Atomic\TNamedObject; use Psalm\Type\Atomic\TNull; use Psalm\Type\Atomic\TNumeric; use Psalm\Type\Atomic\TPositiveInt; use Psalm\Type\Atomic\TTemplateParam; use function array_diff_key; use function array_values; use function is_int; use function preg_match; use function strtolower; use const PHP_INT_MAX; /** * @internal */ class NonDivArithmeticOpAnalyzer { public static function analyze( ?StatementsSource $statements_source, \Psalm\Internal\Provider\NodeDataProvider $nodes, PhpParser\Node\Expr $left, PhpParser\Node\Expr $right, PhpParser\Node $parent, ?Type\Union &$result_type = null, ?Context $context = null ) : void { $codebase = $statements_source ? $statements_source->getCodebase() : null; $left_type = $nodes->getType($left); $right_type = $nodes->getType($right); $config = Config::getInstance(); if ($left_type && $left_type->isEmpty()) { $left_type = $right_type; } elseif ($right_type && $right_type->isEmpty()) { $right_type = $left_type; } if ($left_type && $right_type) { if ($left_type->isNull()) { if ($statements_source && IssueBuffer::accepts( new NullOperand( 'Left operand cannot be null', new CodeLocation($statements_source, $left) ), $statements_source->getSuppressedIssues() )) { // fall through } $result_type = Type::getMixed(); return; } if ($left_type->isNullable() && !$left_type->ignore_nullable_issues) { if ($statements_source && IssueBuffer::accepts( new PossiblyNullOperand( 'Left operand cannot be nullable, got ' . $left_type, new CodeLocation($statements_source, $left) ), $statements_source->getSuppressedIssues() )) { // fall through } } if ($right_type->isNull()) { if ($statements_source && IssueBuffer::accepts( new NullOperand( 'Right operand cannot be null', new CodeLocation($statements_source, $right) ), $statements_source->getSuppressedIssues() )) { // fall through } $result_type = Type::getMixed(); return; } if ($right_type->isNullable() && !$right_type->ignore_nullable_issues) { if ($statements_source && IssueBuffer::accepts( new PossiblyNullOperand( 'Right operand cannot be nullable, got ' . $right_type, new CodeLocation($statements_source, $right) ), $statements_source->getSuppressedIssues() )) { // fall through } } if ($left_type->isFalse()) { if ($statements_source && IssueBuffer::accepts( new FalseOperand( 'Left operand cannot be false', new CodeLocation($statements_source, $left) ), $statements_source->getSuppressedIssues() )) { // fall through } return; } if ($left_type->isFalsable() && !$left_type->ignore_falsable_issues) { if ($statements_source && IssueBuffer::accepts( new PossiblyFalseOperand( 'Left operand cannot be falsable, got ' . $left_type, new CodeLocation($statements_source, $left) ), $statements_source->getSuppressedIssues() )) { // fall through } } if ($right_type->isFalse()) { if ($statements_source && IssueBuffer::accepts( new FalseOperand( 'Right operand cannot be false', new CodeLocation($statements_source, $right) ), $statements_source->getSuppressedIssues() )) { // fall through } return; } if ($right_type->isFalsable() && !$right_type->ignore_falsable_issues) { if ($statements_source && IssueBuffer::accepts( new PossiblyFalseOperand( 'Right operand cannot be falsable, got ' . $right_type, new CodeLocation($statements_source, $right) ), $statements_source->getSuppressedIssues() )) { // fall through } } $invalid_left_messages = []; $invalid_right_messages = []; $has_valid_left_operand = false; $has_valid_right_operand = false; $has_string_increment = false; foreach ($left_type->getAtomicTypes() as $left_type_part) { foreach ($right_type->getAtomicTypes() as $right_type_part) { $candidate_result_type = self::analyzeNonDivOperands( $statements_source, $codebase, $config, $context, $left, $right, $parent, $left_type_part, $right_type_part, $invalid_left_messages, $invalid_right_messages, $has_valid_left_operand, $has_valid_right_operand, $has_string_increment, $result_type ); if ($candidate_result_type) { $result_type = $candidate_result_type; return; } } } if ($invalid_left_messages && $statements_source) { $first_left_message = $invalid_left_messages[0]; if ($has_valid_left_operand) { if (IssueBuffer::accepts( new PossiblyInvalidOperand( $first_left_message, new CodeLocation($statements_source, $left) ), $statements_source->getSuppressedIssues() )) { // fall through } } else { if (IssueBuffer::accepts( new InvalidOperand( $first_left_message, new CodeLocation($statements_source, $left) ), $statements_source->getSuppressedIssues() )) { // fall through } } } if ($invalid_right_messages && $statements_source) { $first_right_message = $invalid_right_messages[0]; if ($has_valid_right_operand) { if (IssueBuffer::accepts( new PossiblyInvalidOperand( $first_right_message, new CodeLocation($statements_source, $right) ), $statements_source->getSuppressedIssues() )) { // fall through } } else { if (IssueBuffer::accepts( new InvalidOperand( $first_right_message, new CodeLocation($statements_source, $right) ), $statements_source->getSuppressedIssues() )) { // fall through } } } if ($has_string_increment && $statements_source) { if (IssueBuffer::accepts( new StringIncrement( 'Possibly unintended string increment', new CodeLocation($statements_source, $left) ), $statements_source->getSuppressedIssues() )) { // fall through } } } } /** * @param string[] &$invalid_left_messages * @param string[] &$invalid_right_messages */ private static function analyzeNonDivOperands( ?StatementsSource $statements_source, ?\Psalm\Codebase $codebase, Config $config, ?Context $context, PhpParser\Node\Expr $left, PhpParser\Node\Expr $right, PhpParser\Node $parent, Type\Atomic $left_type_part, Type\Atomic $right_type_part, array &$invalid_left_messages, array &$invalid_right_messages, bool &$has_valid_left_operand, bool &$has_valid_right_operand, bool &$has_string_increment, Type\Union &$result_type = null ): ?Type\Union { if ($left_type_part instanceof TLiteralInt && $right_type_part instanceof TLiteralInt && ($left instanceof PhpParser\Node\Scalar || $left instanceof PhpParser\Node\Expr\ConstFetch || $left instanceof PhpParser\Node\Expr\ClassConstFetch || $left instanceof PhpParser\Node\Expr\BinaryOp) && ($right instanceof PhpParser\Node\Scalar || $right instanceof PhpParser\Node\Expr\ConstFetch || $right instanceof PhpParser\Node\Expr\ClassConstFetch || $right instanceof PhpParser\Node\Expr\BinaryOp) ) { // time for some arithmetic! $calculated_type = null; if ($parent instanceof PhpParser\Node\Expr\BinaryOp\Plus) { $calculated_type = Type::getInt(false, $left_type_part->value + $right_type_part->value); } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\Minus) { $calculated_type = Type::getInt(false, $left_type_part->value - $right_type_part->value); } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\Mod) { $calculated_type = Type::getInt(false, $left_type_part->value % $right_type_part->value); } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\Mul) { $result = $left_type_part->value * $right_type_part->value; if ($result <= PHP_INT_MAX) { $calculated_type = Type::getInt(false, $result); } else { $calculated_type = Type::getFloat($result); } } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\Pow) { $result = $left_type_part->value ** $right_type_part->value; if ($result <= PHP_INT_MAX) { $calculated_type = Type::getInt(false, $result); } else { $calculated_type = Type::getFloat($result); } } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\BitwiseOr) { $calculated_type = Type::getInt(false, $left_type_part->value | $right_type_part->value); } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\BitwiseAnd) { $calculated_type = Type::getInt(false, $left_type_part->value & $right_type_part->value); } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\BitwiseXor) { $calculated_type = Type::getInt(false, $left_type_part->value ^ $right_type_part->value); } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\ShiftLeft) { $calculated_type = Type::getInt(false, $left_type_part->value << $right_type_part->value); } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\ShiftRight) { $calculated_type = Type::getInt(false, $left_type_part->value >> $right_type_part->value); } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\Div) { if ($right_type_part->value === 0) { $calculated_type = Type::getEmpty(); } else { $value = $left_type_part->value / $right_type_part->value; if (is_int($value)) { $calculated_type = Type::getInt(false, $value); } else { $calculated_type = Type::getFloat($value); } } } if ($calculated_type) { if ($result_type) { $result_type = Type::combineUnionTypes( $calculated_type, $result_type ); } else { $result_type = $calculated_type; } $has_valid_left_operand = true; $has_valid_right_operand = true; return null; } } if ($left_type_part instanceof TNull || $right_type_part instanceof TNull) { // null case is handled above return null; } if ($left_type_part instanceof TFalse || $right_type_part instanceof TFalse) { // null case is handled above return null; } if ($left_type_part instanceof Type\Atomic\TString && $right_type_part instanceof TInt && $parent instanceof PhpParser\Node\Expr\PostInc ) { $has_string_increment = true; if (!$result_type) { $result_type = Type::getNonEmptyString(); } else { $result_type = Type::combineUnionTypes(Type::getNonEmptyString(), $result_type); } $has_valid_left_operand = true; $has_valid_right_operand = true; return null; } if ($left_type_part instanceof TTemplateParam && $right_type_part instanceof TTemplateParam ) { $combined_type = Type::combineUnionTypes( $left_type_part->as, $right_type_part->as ); $combined_atomic_types = array_values($combined_type->getAtomicTypes()); if (\count($combined_atomic_types) <= 2) { $left_type_part = $combined_atomic_types[0]; $right_type_part = $combined_atomic_types[1] ?? $combined_atomic_types[0]; } } if ($left_type_part instanceof TMixed || $right_type_part instanceof TMixed || $left_type_part instanceof TTemplateParam || $right_type_part instanceof TTemplateParam ) { if ($statements_source && $codebase && $context) { if (!$context->collect_initializations && !$context->collect_mutations && $statements_source->getFilePath() === $statements_source->getRootFilePath() && (!(($source = $statements_source->getSource()) instanceof \Psalm\Internal\Analyzer\FunctionLikeAnalyzer) || !$source->getSource() instanceof \Psalm\Internal\Analyzer\TraitAnalyzer) ) { $codebase->analyzer->incrementMixedCount($statements_source->getFilePath()); } } if ($left_type_part instanceof TMixed || $left_type_part instanceof TTemplateParam) { if ($statements_source && IssueBuffer::accepts( new MixedOperand( 'Left operand cannot be mixed', new CodeLocation($statements_source, $left) ), $statements_source->getSuppressedIssues() )) { // fall through } } else { if ($statements_source && IssueBuffer::accepts( new MixedOperand( 'Right operand cannot be mixed', new CodeLocation($statements_source, $right) ), $statements_source->getSuppressedIssues() )) { // fall through } } if ($left_type_part instanceof TMixed && $left_type_part->from_loop_isset && $parent instanceof PhpParser\Node\Expr\AssignOp\Plus && !$right_type_part instanceof TMixed ) { $result_type_member = new Type\Union([$right_type_part]); if (!$result_type) { $result_type = $result_type_member; } else { $result_type = Type::combineUnionTypes($result_type_member, $result_type); } return null; } $from_loop_isset = (!($left_type_part instanceof TMixed) || $left_type_part->from_loop_isset) && (!($right_type_part instanceof TMixed) || $right_type_part->from_loop_isset); $result_type = Type::getMixed($from_loop_isset); return $result_type; } if ($statements_source && $codebase && $context) { if (!$context->collect_initializations && !$context->collect_mutations && $statements_source->getFilePath() === $statements_source->getRootFilePath() && (!(($parent_source = $statements_source->getSource()) instanceof \Psalm\Internal\Analyzer\FunctionLikeAnalyzer) || !$parent_source->getSource() instanceof \Psalm\Internal\Analyzer\TraitAnalyzer) ) { $codebase->analyzer->incrementNonMixedCount($statements_source->getFilePath()); } } if ($left_type_part instanceof TArray || $right_type_part instanceof TArray || $left_type_part instanceof TKeyedArray || $right_type_part instanceof TKeyedArray || $left_type_part instanceof TList || $right_type_part instanceof TList ) { if ((!$right_type_part instanceof TArray && !$right_type_part instanceof TKeyedArray && !$right_type_part instanceof TList) || (!$left_type_part instanceof TArray && !$left_type_part instanceof TKeyedArray && !$left_type_part instanceof TList) ) { if (!$left_type_part instanceof TArray && !$left_type_part instanceof TKeyedArray && !$left_type_part instanceof TList ) { $invalid_left_messages[] = 'Cannot add an array to a non-array ' . $left_type_part; } else { $invalid_right_messages[] = 'Cannot add an array to a non-array ' . $right_type_part; } if ($left_type_part instanceof TArray || $left_type_part instanceof TKeyedArray || $left_type_part instanceof TList ) { $has_valid_left_operand = true; } elseif ($right_type_part instanceof TArray || $right_type_part instanceof TKeyedArray || $right_type_part instanceof TList ) { $has_valid_right_operand = true; } $result_type = Type::getArray(); return null; } $has_valid_right_operand = true; $has_valid_left_operand = true; if ($left_type_part instanceof TKeyedArray && $right_type_part instanceof TKeyedArray ) { $definitely_existing_mixed_right_properties = array_diff_key( $right_type_part->properties, $left_type_part->properties ); $properties = $left_type_part->properties; foreach ($right_type_part->properties as $key => $type) { if (!isset($properties[$key])) { $properties[$key] = $type; } elseif ($properties[$key]->possibly_undefined) { $properties[$key] = Type::combineUnionTypes( $properties[$key], $type, $codebase ); $properties[$key]->possibly_undefined = $type->possibly_undefined; } } if (!$left_type_part->sealed) { foreach ($definitely_existing_mixed_right_properties as $key => $type) { $properties[$key] = Type::combineUnionTypes(Type::getMixed(), $type); } } $result_type_member = new Type\Union([new TKeyedArray($properties)]); } else { $result_type_member = TypeCombiner::combine( [$left_type_part, $right_type_part], $codebase, true ); } if (!$result_type) { $result_type = $result_type_member; } else { $result_type = Type::combineUnionTypes($result_type_member, $result_type, $codebase, true); } if ($left instanceof PhpParser\Node\Expr\ArrayDimFetch && $context && $statements_source instanceof StatementsAnalyzer ) { ArrayAssignmentAnalyzer::updateArrayType( $statements_source, $left, $right, $result_type, $context ); } return null; } if (($left_type_part instanceof TNamedObject && strtolower($left_type_part->value) === 'gmp') || ($right_type_part instanceof TNamedObject && strtolower($right_type_part->value) === 'gmp') ) { if ((($left_type_part instanceof TNamedObject && strtolower($left_type_part->value) === 'gmp') && (($right_type_part instanceof TNamedObject && strtolower($right_type_part->value) === 'gmp') || ($right_type_part->isNumericType() || $right_type_part instanceof TMixed))) || (($right_type_part instanceof TNamedObject && strtolower($right_type_part->value) === 'gmp') && (($left_type_part instanceof TNamedObject && strtolower($left_type_part->value) === 'gmp') || ($left_type_part->isNumericType() || $left_type_part instanceof TMixed))) ) { if (!$result_type) { $result_type = new Type\Union([new TNamedObject('GMP')]); } else { $result_type = Type::combineUnionTypes( new Type\Union([new TNamedObject('GMP')]), $result_type ); } } else { if ($statements_source && IssueBuffer::accepts( new InvalidOperand( 'Cannot add GMP to non-numeric type', new CodeLocation($statements_source, $parent) ), $statements_source->getSuppressedIssues() )) { // fall through } } return null; } if ($left_type_part instanceof Type\Atomic\TLiteralString) { if (preg_match('/^\-?\d+$/', $left_type_part->value)) { $left_type_part = new Type\Atomic\TLiteralInt((int) $left_type_part->value); } elseif (preg_match('/^\-?\d?\.\d+$/', $left_type_part->value)) { $left_type_part = new Type\Atomic\TLiteralFloat((float) $left_type_part->value); } } if ($right_type_part instanceof Type\Atomic\TLiteralString) { if (preg_match('/^\-?\d+$/', $right_type_part->value)) { $right_type_part = new Type\Atomic\TLiteralInt((int) $right_type_part->value); } elseif (preg_match('/^\-?\d?\.\d+$/', $right_type_part->value)) { $right_type_part = new Type\Atomic\TLiteralFloat((float) $right_type_part->value); } } if ($left_type_part->isNumericType() || $right_type_part->isNumericType()) { if (($left_type_part instanceof TNumeric || $right_type_part instanceof TNumeric) && ($left_type_part->isNumericType() && $right_type_part->isNumericType()) ) { if ($parent instanceof PhpParser\Node\Expr\BinaryOp\Mod) { $result_type = Type::getInt(); } elseif (!$result_type) { $result_type = Type::getNumeric(); } else { $result_type = Type::combineUnionTypes(Type::getNumeric(), $result_type); } $has_valid_right_operand = true; $has_valid_left_operand = true; return null; } if ($left_type_part instanceof TInt && $right_type_part instanceof TInt) { if ($parent instanceof PhpParser\Node\Expr\BinaryOp\Div) { $result_type = new Type\Union([new Type\Atomic\TInt(), new Type\Atomic\TFloat()]); } else { $left_is_positive = $left_type_part instanceof TPositiveInt || ($left_type_part instanceof TLiteralInt && $left_type_part->value > 0); $right_is_positive = $right_type_part instanceof TPositiveInt || ($right_type_part instanceof TLiteralInt && $right_type_part->value > 0); if ($parent instanceof PhpParser\Node\Expr\BinaryOp\Minus) { $always_positive = false; } elseif ($left_is_positive && $right_is_positive) { if ($parent instanceof PhpParser\Node\Expr\BinaryOp\BitwiseXor || $parent instanceof PhpParser\Node\Expr\BinaryOp\BitwiseAnd || $parent instanceof PhpParser\Node\Expr\BinaryOp\ShiftLeft || $parent instanceof PhpParser\Node\Expr\BinaryOp\ShiftRight ) { $always_positive = false; } else { $always_positive = true; } } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\Plus && ($left_type_part instanceof TLiteralInt && $left_type_part->value === 0) && $right_is_positive ) { $always_positive = true; } elseif ($parent instanceof PhpParser\Node\Expr\BinaryOp\Plus && ($right_type_part instanceof TLiteralInt && $right_type_part->value === 0) && $left_is_positive ) { $always_positive = true; } else { $always_positive = false; } if ($parent instanceof PhpParser\Node\Expr\BinaryOp\Mod) { if ($always_positive) { if ($right_type_part instanceof TLiteralInt && $right_type_part->value === 1) { $result_type = Type::getInt(true, 0); } else { $result_type = new Type\Union([ new Type\Atomic\TPositiveInt(), new TLiteralInt(0) ]); } } else { $result_type = Type::getInt(); } } elseif (!$result_type) { $result_type = $always_positive ? Type::getPositiveInt(true) : Type::getInt(true); } else { $result_type = Type::combineUnionTypes( $always_positive ? Type::getPositiveInt(true) : Type::getInt(true), $result_type ); } } $has_valid_right_operand = true; $has_valid_left_operand = true; return null; } if ($left_type_part instanceof TFloat && $right_type_part instanceof TFloat) { if ($parent instanceof PhpParser\Node\Expr\BinaryOp\Mod) { $result_type = Type::getInt(); } elseif (!$result_type) { $result_type = Type::getFloat(); } else { $result_type = Type::combineUnionTypes(Type::getFloat(), $result_type); } $has_valid_right_operand = true; $has_valid_left_operand = true; return null; } if (($left_type_part instanceof TFloat && $right_type_part instanceof TInt) || ($left_type_part instanceof TInt && $right_type_part instanceof TFloat) ) { if ($config->strict_binary_operands) { if ($statements_source && IssueBuffer::accepts( new InvalidOperand( 'Cannot add ints to floats', new CodeLocation($statements_source, $parent) ), $statements_source->getSuppressedIssues() )) { // fall through } } if ($parent instanceof PhpParser\Node\Expr\BinaryOp\Mod) { $result_type = Type::getInt(); } elseif (!$result_type) { $result_type = Type::getFloat(); } else { $result_type = Type::combineUnionTypes(Type::getFloat(), $result_type); } $has_valid_right_operand = true; $has_valid_left_operand = true; return null; } if ($left_type_part->isNumericType() && $right_type_part->isNumericType()) { if ($config->strict_binary_operands) { if ($statements_source && IssueBuffer::accepts( new InvalidOperand( 'Cannot add numeric types together, please cast explicitly', new CodeLocation($statements_source, $parent) ), $statements_source->getSuppressedIssues() )) { // fall through } } if ($parent instanceof PhpParser\Node\Expr\BinaryOp\Mod) { $result_type = Type::getInt(); } else { $result_type = new Type\Union([new Type\Atomic\TInt, new Type\Atomic\TFloat]); } $has_valid_right_operand = true; $has_valid_left_operand = true; return null; } if (!$left_type_part->isNumericType()) { $invalid_left_messages[] = 'Cannot perform a numeric operation with a non-numeric type ' . $left_type_part; $has_valid_right_operand = true; } else { $invalid_right_messages[] = 'Cannot perform a numeric operation with a non-numeric type ' . $right_type_part; $has_valid_left_operand = true; } } else { $invalid_left_messages[] = 'Cannot perform a numeric operation with non-numeric types ' . $left_type_part . ' and ' . $right_type_part; } return null; } }
1
10,540
`assert(is_int($result));` would be preferable I think, even if we know it can't ever be false
vimeo-psalm
php
@@ -162,14 +162,14 @@ class Cart /** * @return \Shopsys\FrameworkBundle\Model\Order\Item\QuantifiedProduct[] */ - public function getQuantifiedProductsIndexedByItemId() + public function getQuantifiedProducts() { - $quantifiedProductsByItemId = []; + $quantifiedProducts = []; foreach ($this->items as $item) { - $quantifiedProductsByItemId[$item->getId()] = new QuantifiedProduct($item->getProduct(), $item->getQuantity()); + $quantifiedProducts[] = new QuantifiedProduct($item->getProduct(), $item->getQuantity()); } - return $quantifiedProductsByItemId; + return $quantifiedProducts; } /**
1
<?php namespace Shopsys\FrameworkBundle\Model\Cart; use DateTime; use Doctrine\Common\Collections\ArrayCollection; use Doctrine\ORM\Mapping as ORM; use Shopsys\FrameworkBundle\Model\Cart\Item\CartItem; use Shopsys\FrameworkBundle\Model\Cart\Item\CartItemFactoryInterface; use Shopsys\FrameworkBundle\Model\Customer\User; use Shopsys\FrameworkBundle\Model\Order\Item\QuantifiedProduct; use Shopsys\FrameworkBundle\Model\Product\Pricing\ProductPriceCalculationForUser; use Shopsys\FrameworkBundle\Model\Product\Product; /** * @ORM\Table(name="carts") * @ORM\Entity */ class Cart { /** * @var int * * @ORM\Column(type="integer") * @ORM\Id * @ORM\GeneratedValue(strategy="IDENTITY") */ protected $id; /** * @var string * * @ORM\Column(type="string", length=127) */ protected $cartIdentifier; /** * @var \Shopsys\FrameworkBundle\Model\Customer\User|null * * @ORM\ManyToOne(targetEntity="Shopsys\FrameworkBundle\Model\Customer\User") * @ORM\JoinColumn(name="user_id", referencedColumnName="id", nullable = true, onDelete="CASCADE") */ protected $user; /** * @var \Shopsys\FrameworkBundle\Model\Cart\Item\CartItem[] * * @ORM\OneToMany( * targetEntity="Shopsys\FrameworkBundle\Model\Cart\Item\CartItem", * mappedBy="cart", * cascade={"remove"}, * orphanRemoval=true * ) * @ORM\OrderBy({"id" = "DESC"}) */ protected $items; /** * @var \DateTime * * @ORM\Column(type="datetime") */ protected $modifiedAt; /** * @param string $cartIdentifier * @param \Shopsys\FrameworkBundle\Model\Customer\User|null $user */ public function __construct(string $cartIdentifier, User $user = null) { $this->cartIdentifier = $cartIdentifier; $this->user = $user; $this->items = new ArrayCollection(); $this->modifiedAt = new DateTime(); } /** * @param \Shopsys\FrameworkBundle\Model\Cart\Item\CartItem $item */ public function addItem(CartItem $item) { if (!$this->items->contains($item)) { $this->items->add($item); $this->setModifiedNow(); } } /** * @param int $itemId */ public function removeItemById($itemId) { foreach ($this->items as $key => $item) { if ($item->getId() === $itemId) { $this->items->removeElement($item); $this->setModifiedNow(); return; } } $message = 'Cart item with ID = ' . $itemId . ' is not in cart for remove.'; throw new \Shopsys\FrameworkBundle\Model\Cart\Exception\InvalidCartItemException($message); } public function clean() { $this->items->clear(); } /** * @return \Shopsys\FrameworkBundle\Model\Cart\Item\CartItem[] */ public function getItems() { return $this->items->toArray(); } /** * @return int */ public function getItemsCount() { return $this->items->count(); } /** * @return bool */ public function isEmpty() { return $this->getItemsCount() === 0; } /** * @param array $quantitiesByItemId */ public function changeQuantities(array $quantitiesByItemId) { foreach ($this->items as $item) { if (array_key_exists($item->getId(), $quantitiesByItemId)) { $item->changeQuantity($quantitiesByItemId[$item->getId()]); } } $this->setModifiedNow(); } /** * @param int $itemId * @return \Shopsys\FrameworkBundle\Model\Cart\Item\CartItem */ public function getItemById($itemId) { foreach ($this->items as $item) { if ($item->getId() === $itemId) { return $item; } } $message = 'CartItem with id = ' . $itemId . ' not found in cart.'; throw new \Shopsys\FrameworkBundle\Model\Cart\Exception\InvalidCartItemException($message); } /** * @return \Shopsys\FrameworkBundle\Model\Order\Item\QuantifiedProduct[] */ public function getQuantifiedProductsIndexedByItemId() { $quantifiedProductsByItemId = []; foreach ($this->items as $item) { $quantifiedProductsByItemId[$item->getId()] = new QuantifiedProduct($item->getProduct(), $item->getQuantity()); } return $quantifiedProductsByItemId; } /** * @param \Shopsys\FrameworkBundle\Model\Cart\Cart $cartToMerge * @param \Shopsys\FrameworkBundle\Model\Cart\Item\CartItemFactoryInterface $cartItemFactory */ public function mergeWithCart(self $cartToMerge, CartItemFactoryInterface $cartItemFactory) { foreach ($cartToMerge->getItems() as $itemToMerge) { $similarItem = $this->findSimilarItemByItem($itemToMerge); if ($similarItem instanceof CartItem) { $similarItem->changeQuantity($similarItem->getQuantity() + $itemToMerge->getQuantity()); } else { $newCartItem = $cartItemFactory->create( $this, $itemToMerge->getProduct(), $itemToMerge->getQuantity(), $itemToMerge->getWatchedPrice() ); $this->addItem($newCartItem); } } $this->setModifiedNow(); } /** * @param \Shopsys\FrameworkBundle\Model\Cart\Item\CartItem $item * @return \Shopsys\FrameworkBundle\Model\Cart\Item\CartItem|null */ protected function findSimilarItemByItem(CartItem $item) { foreach ($this->items as $similarItem) { if ($similarItem->isSimilarItemAs($item)) { return $similarItem; } } return null; } /** * @param \Shopsys\FrameworkBundle\Model\Product\Product $product * @param int $quantity * @param \Shopsys\FrameworkBundle\Model\Product\Pricing\ProductPriceCalculationForUser $productPriceCalculation * @param \Shopsys\FrameworkBundle\Model\Cart\Item\CartItemFactoryInterface $cartItemFactory * @return \Shopsys\FrameworkBundle\Model\Cart\AddProductResult */ public function addProduct( Product $product, $quantity, ProductPriceCalculationForUser $productPriceCalculation, CartItemFactoryInterface $cartItemFactory ) { if (!is_int($quantity) || $quantity <= 0) { throw new \Shopsys\FrameworkBundle\Model\Cart\Exception\InvalidQuantityException($quantity); } foreach ($this->items as $item) { if ($item->getProduct() === $product) { $item->changeQuantity($item->getQuantity() + $quantity); $item->changeAddedAt(new DateTime()); return new AddProductResult($item, false, $quantity); } } $productPrice = $productPriceCalculation->calculatePriceForCurrentUser($product); $newCartItem = $cartItemFactory->create($this, $product, $quantity, $productPrice->getPriceWithVat()->toValue()); $this->addItem($newCartItem); $this->setModifiedNow(); return new AddProductResult($newCartItem, true, $quantity); } /** * @return string */ public function getCartIdentifier() { return $this->cartIdentifier; } protected function setModifiedNow() { $this->modifiedAt = new DateTime(); } /** * @param \DateTime $modifiedAt */ public function setModifiedAt(DateTime $modifiedAt) { $this->modifiedAt = $modifiedAt; } }
1
14,983
I'm unfortunately unable to review whether you've changed everything that used to use cartIds
shopsys-shopsys
php
@@ -3,14 +3,15 @@ package de.danoeh.antennapod.dialog; import android.app.Dialog; import android.content.Context; import android.content.SharedPreferences; +import android.util.Log; + import androidx.annotation.Nullable; import androidx.annotation.VisibleForTesting; -import android.util.Log; +import androidx.appcompat.app.AlertDialog; import java.lang.ref.WeakReference; import java.util.concurrent.TimeUnit; -import androidx.appcompat.app.AlertDialog; import de.danoeh.antennapod.R; import de.danoeh.antennapod.core.util.IntentUtils;
1
package de.danoeh.antennapod.dialog; import android.app.Dialog; import android.content.Context; import android.content.SharedPreferences; import androidx.annotation.Nullable; import androidx.annotation.VisibleForTesting; import android.util.Log; import java.lang.ref.WeakReference; import java.util.concurrent.TimeUnit; import androidx.appcompat.app.AlertDialog; import de.danoeh.antennapod.R; import de.danoeh.antennapod.core.util.IntentUtils; public class RatingDialog { private RatingDialog(){} private static final String TAG = RatingDialog.class.getSimpleName(); private static final int AFTER_DAYS = 7; private static WeakReference<Context> mContext; private static SharedPreferences mPreferences; private static Dialog mDialog; private static final String PREFS_NAME = "RatingPrefs"; private static final String KEY_RATED = "KEY_WAS_RATED"; private static final String KEY_FIRST_START_DATE = "KEY_FIRST_HIT_DATE"; public static void init(Context context) { mContext = new WeakReference<>(context); mPreferences = context.getSharedPreferences(PREFS_NAME, Context.MODE_PRIVATE); long firstDate = mPreferences.getLong(KEY_FIRST_START_DATE, 0); if (firstDate == 0) { resetStartDate(); } } public static void check() { if (mDialog != null && mDialog.isShowing()) { return; } if (shouldShow()) { try { mDialog = createDialog(); if (mDialog != null) { mDialog.show(); } } catch (Exception e) { Log.e(TAG, Log.getStackTraceString(e)); } } } private static void rateNow() { Context context = mContext.get(); if (context == null) { return; } IntentUtils.openInBrowser(context, "https://play.google.com/store/apps/details?id=de.danoeh.antennapod"); saveRated(); } private static boolean rated() { return mPreferences.getBoolean(KEY_RATED, false); } @VisibleForTesting public static void saveRated() { mPreferences .edit() .putBoolean(KEY_RATED, true) .apply(); } private static void resetStartDate() { mPreferences .edit() .putLong(KEY_FIRST_START_DATE, System.currentTimeMillis()) .apply(); } private static boolean shouldShow() { if (rated()) { return false; } long now = System.currentTimeMillis(); long firstDate = mPreferences.getLong(KEY_FIRST_START_DATE, now); long diff = now - firstDate; long diffDays = TimeUnit.DAYS.convert(diff, TimeUnit.MILLISECONDS); return diffDays >= AFTER_DAYS; } @Nullable private static AlertDialog createDialog() { Context context = mContext.get(); if (context == null) { return null; } return new AlertDialog.Builder(context) .setTitle(R.string.rating_title) .setMessage(R.string.rating_message) .setPositiveButton(R.string.rating_now_label, (dialog, which) -> rateNow()) .setNegativeButton(R.string.rating_never_label, (dialog, which) -> saveRated()) .setNeutralButton(R.string.rating_later_label, (dialog, which) -> resetStartDate()) .setOnCancelListener(dialog1 -> resetStartDate()) .create(); } }
1
18,208
Please don't touch unrelated files to keep the git history clean
AntennaPod-AntennaPod
java
@@ -0,0 +1,19 @@ +module AbTests + class LandingHeadlineTest < Base + def setup + variation(ab_test(test_name, "orig", "v1")) + end + + def finish + finished(test_name) + end + + private + + def variation(key) + # The :name value isn't needed in all variations. Passing it when it is + # not needed allows for simpler code and will do no harm. + I18n.t("headlines.landing.#{key}", name: I18n.t('shared.subscription.name')) + end + end +end
1
1
8,662
Per our style guide, I don't think we indent `private` keyword. Would you mind fixing that?
thoughtbot-upcase
rb
@@ -54,7 +54,7 @@ def gen_gaussian_target(heatmap, center, radius, k=1): masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian_kernel[radius - top:radius + bottom, radius - left:radius + right] - out_heatmap = torch.zeros_like(heatmap) + out_heatmap = heatmap torch.max( masked_heatmap, masked_gaussian * k,
1
from math import sqrt import torch def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'): """Generate 2D gaussian kernel. Args: radius (int): Radius of gaussian kernel. sigma (int): Sigma of gaussian function. Default: 1. dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32. device (str): Device of gaussian tensor. Default: 'cpu'. Returns: h (Tensor): Gaussian kernel with a ``(2 * radius + 1) * (2 * radius + 1)`` shape. """ x = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(1, -1) y = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(-1, 1) h = (-(x * x + y * y) / (2 * sigma * sigma)).exp() h[h < torch.finfo(h.dtype).eps * h.max()] = 0 return h def gen_gaussian_target(heatmap, center, radius, k=1): """Generate 2D gaussian heatmap. Args: heatmap (Tensor): Input heatmap, the gaussian kernel will cover on it and maintain the max value. center (list[int]): Coord of gaussian kernel's center. radius (int): Radius of gaussian kernel. k (int): Coefficient of gaussian kernel. Default: 1. Returns: out_heatmap (Tensor): Updated heatmap covered by gaussian kernel. """ diameter = 2 * radius + 1 gaussian_kernel = gaussian2D( radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device) x, y = center height, width = heatmap.shape[:2] left, right = min(x, radius), min(width - x, radius + 1) top, bottom = min(y, radius), min(height - y, radius + 1) masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian_kernel[radius - top:radius + bottom, radius - left:radius + right] out_heatmap = torch.zeros_like(heatmap) torch.max( masked_heatmap, masked_gaussian * k, out=out_heatmap[y - top:y + bottom, x - left:x + right]) return out_heatmap def gaussian_radius(det_size, min_overlap): r"""Generate 2D gaussian radius. This function is modified from the `official github repo <https://github.com/princeton-vl/CornerNet-Lite/blob/master/core/sample/ utils.py#L65>`_. Given ``min_overlap``, radius could computed by a quadratic equation according to Vieta's formulas. There are 3 cases for computing gaussian radius, details are following: - Explanation of figure: ``lt`` and ``br`` indicates the left-top and bottom-right corner of ground truth box. ``x`` indicates the generated corner at the limited position when ``radius=r``. - Case1: one corner is inside the gt box and the other is outside. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x----------+--+ | | | | | | | | height | | overlap | | | | | | | | | | v +--+---------br--+ - | | | +----------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\ {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case2: both two corners are inside the gt box. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x-------+ | | | | | | |overlap| | height | | | | | +-------x--+ | | | v +----------+-br - To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\ {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case3: both two corners are outside the gt box. .. code:: text |< width >| x--+----------------+ | | | +-lt-------------+ | - | | | | ^ | | | | | | overlap | | height | | | | | | | | v | +------------br--+ - | | | +----------------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\ {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\ {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a} Args: det_size (list[int]): Shape of object. min_overlap (float): Min IoU with ground truth for boxes generated by keypoints inside the gaussian kernel. Returns: radius (int): Radius of gaussian kernel. """ height, width = det_size a1 = 1 b1 = (height + width) c1 = width * height * (1 - min_overlap) / (1 + min_overlap) sq1 = sqrt(b1**2 - 4 * a1 * c1) r1 = (b1 - sq1) / (2 * a1) a2 = 4 b2 = 2 * (height + width) c2 = (1 - min_overlap) * width * height sq2 = sqrt(b2**2 - 4 * a2 * c2) r2 = (b2 - sq2) / (2 * a2) a3 = 4 * min_overlap b3 = -2 * min_overlap * (height + width) c3 = (min_overlap - 1) * width * height sq3 = sqrt(b3**2 - 4 * a3 * c3) r3 = (b3 + sq3) / (2 * a3) return min(r1, r2, r3)
1
20,990
Will this change the input `heatmap`? Is this behavior expected or not?
open-mmlab-mmdetection
py
@@ -411,7 +411,7 @@ func init() { runtime.RegisterPackageValue("strings", "lastIndex", generateDualArgStringFunctionReturnInt("lastIndex", []string{stringArgV, substr}, strings.LastIndex)) runtime.RegisterPackageValue("strings", "lastIndexAny", - generateDualArgStringFunctionReturnInt("lastIndexAny", []string{stringArgV, substr}, strings.LastIndexAny)) + generateDualArgStringFunctionReturnInt("lastIndexAny", []string{stringArgV, chars}, strings.LastIndexAny)) runtime.RegisterPackageValue("strings", "isDigit", generateUnicodeIsFunction("isDigit", unicode.IsDigit)) runtime.RegisterPackageValue("strings", "isLetter",
1
package strings import ( "context" "fmt" "strings" "unicode" "unicode/utf8" "github.com/influxdata/flux/interpreter" "github.com/influxdata/flux/runtime" "github.com/influxdata/flux/semantic" "github.com/influxdata/flux/values" ) var SpecialFns map[string]values.Function const ( stringArgV = "v" stringArgT = "t" stringArgU = "u" cutset = "cutset" prefix = "prefix" suffix = "suffix" substr = "substr" chars = "chars" integer = "i" start = "start" end = "end" ) func generateSingleArgStringFunction(name string, stringFn func(string) string) values.Function { return values.NewFunction( name, runtime.MustLookupBuiltinType("strings", name), func(ctx context.Context, args values.Object) (values.Value, error) { var str string v, ok := args.Get(stringArgV) if !ok { return nil, fmt.Errorf("missing argument %q", stringArgV) } if !v.IsNull() && v.Type().Nature() == semantic.String { str = v.Str() str = stringFn(str) return values.NewString(str), nil } return nil, fmt.Errorf("cannot convert argument of type %v value %v to upper case", v.Type().Nature(), v) }, false, ) } func generateDualArgStringFunction(name string, argNames []string, stringFn func(string, string) string) values.Function { if len(argNames) != 2 { panic("unexpected number of argument names") } return values.NewFunction( name, runtime.MustLookupBuiltinType("strings", name), func(ctx context.Context, args values.Object) (values.Value, error) { var argVals = make([]values.Value, 2) for i, name := range argNames { val, ok := args.Get(name) if !ok { return nil, fmt.Errorf("missing argument %q", name) } if val.IsNull() || val.Type().Nature() != semantic.String { return nil, fmt.Errorf("expected argument %q to be of type %v, got type %v value %v", name, semantic.String, val.Type().Nature(), val) } argVals[i] = val } return values.NewString(stringFn(argVals[0].Str(), argVals[1].Str())), nil }, false, ) } func generateDualArgStringFunctionReturnBool(name string, argNames []string, stringFn func(string, string) bool) values.Function { if len(argNames) != 2 { panic("unexpected number of argument names") } return values.NewFunction( name, runtime.MustLookupBuiltinType("strings", name), func(ctx context.Context, args values.Object) (values.Value, error) { var argVals = make([]values.Value, 2) for i, name := range argNames { val, ok := args.Get(name) if !ok { return nil, fmt.Errorf("missing argument %q", name) } if val.IsNull() || val.Type().Nature() != semantic.String { return nil, fmt.Errorf("expected argument %q to be of type %v, got type %v value %v", name, semantic.String, val.Type().Nature(), val) } argVals[i] = val } return values.NewBool(bool(stringFn(argVals[0].Str(), argVals[1].Str()))), nil }, false, ) } func generateDualArgStringFunctionReturnInt(name string, argNames []string, stringFn func(string, string) int) values.Function { if len(argNames) != 2 { panic("unexpected number of argument names") } return values.NewFunction( name, runtime.MustLookupBuiltinType("strings", name), func(ctx context.Context, args values.Object) (values.Value, error) { var argVals = make([]values.Value, 2) for i, name := range argNames { val, ok := args.Get(name) if !ok { return nil, fmt.Errorf("missing argument %q", name) } if val.IsNull() || val.Type().Nature() != semantic.String { return nil, fmt.Errorf("expected argument %q to be of type %v, got type %v value %v", name, semantic.String, val.Type().Nature(), val) } argVals[i] = val } return values.NewInt(int64(stringFn(argVals[0].Str(), argVals[1].Str()))), nil }, false, ) } func generateSplit(name string, argNames []string, fn func(string, string) []string) values.Function { return values.NewFunction( name, runtime.MustLookupBuiltinType("strings", name), func(ctx context.Context, args values.Object) (values.Value, error) { var argVals = make([]values.Value, 2) for i, name := range argNames { val, ok := args.Get(name) if !ok { return nil, fmt.Errorf("missing argument %q", name) } if val.IsNull() || val.Type().Nature() != semantic.String { return nil, fmt.Errorf("expected argument %q to be of type %v, got type %v value %v", name, semantic.String, val.Type().Nature(), val) } argVals[i] = val } result := fn(argVals[0].Str(), argVals[1].Str()) var resultValue []values.Value for _, v := range result { resultValue = append(resultValue, values.NewString(v)) } return values.NewArrayWithBacking(semantic.NewArrayType(semantic.BasicString), resultValue), nil }, false, ) } func generateSplitN(name string, argNames []string, fn func(string, string, int) []string) values.Function { return values.NewFunction( name, runtime.MustLookupBuiltinType("strings", name), func(ctx context.Context, args values.Object) (values.Value, error) { var argVals = make([]values.Value, 3) var argTypes = []semantic.Nature{semantic.String, semantic.String, semantic.Int} for i, name := range argNames { val, ok := args.Get(name) if !ok { return nil, fmt.Errorf("missing argument %q", name) } if val.IsNull() || val.Type().Nature() != argTypes[i] { return nil, fmt.Errorf("expected argument %q to be of type %v, got type %v value %v", name, argTypes[i], val.Type().Nature(), val) } argVals[i] = val } result := fn(argVals[0].Str(), argVals[1].Str(), int(argVals[2].Int())) var resultValue []values.Value for _, v := range result { resultValue = append(resultValue, values.NewString(v)) } return values.NewArrayWithBacking(semantic.NewArrayType(semantic.BasicString), resultValue), nil }, false, ) } func generateRepeat(name string, argNames []string, fn func(string, int) string) values.Function { return values.NewFunction( name, runtime.MustLookupBuiltinType("strings", name), func(ctx context.Context, args values.Object) (values.Value, error) { var argVals = make([]values.Value, 2) var argType = []semantic.Nature{semantic.String, semantic.Int} for i, name := range argNames { val, ok := args.Get(name) if !ok { return nil, fmt.Errorf("missing argument %q", name) } if val.IsNull() || val.Type().Nature() != argType[i] { return nil, fmt.Errorf("expected argument %q to be of type %v, got type %v value %v", name, argType[i], val.Type().Nature(), val) } argVals[i] = val } return values.NewString(fn(argVals[0].Str(), int(argVals[1].Int()))), nil }, false, ) } func generateReplace(name string, argNames []string, fn func(string, string, string, int) string) values.Function { return values.NewFunction( name, runtime.MustLookupBuiltinType("strings", name), func(ctx context.Context, args values.Object) (values.Value, error) { var argVals = make([]values.Value, 4) var argType = []semantic.Nature{semantic.String, semantic.String, semantic.String, semantic.Int} for i, name := range argNames { val, ok := args.Get(name) if !ok { return nil, fmt.Errorf("missing argument %q", name) } if val.IsNull() || val.Type().Nature() != argType[i] { return nil, fmt.Errorf("expected argument %q to be of type %v, got type %v value %v", name, argType[i], val.Type().Nature(), val) } argVals[i] = val } return values.NewString(fn(argVals[0].Str(), argVals[1].Str(), argVals[2].Str(), int(argVals[3].Int()))), nil }, false, ) } func generateReplaceAll(name string, argNames []string, fn func(string, string, string) string) values.Function { return values.NewFunction( name, runtime.MustLookupBuiltinType("strings", name), func(ctx context.Context, args values.Object) (values.Value, error) { var argVals = make([]values.Value, 3) for i, name := range argNames { val, ok := args.Get(name) if !ok { return nil, fmt.Errorf("missing argument %q", name) } if val.IsNull() || val.Type().Nature() != semantic.String { return nil, fmt.Errorf("expected argument %q to be of type %v, got type %v value %v", name, semantic.String, val.Type().Nature(), val) } argVals[i] = val } return values.NewString(fn(argVals[0].Str(), argVals[1].Str(), argVals[2].Str())), nil }, false, ) } func generateUnicodeIsFunction(name string, Fn func(rune) bool) values.Function { return values.NewFunction( name, runtime.MustLookupBuiltinType("strings", name), func(ctx context.Context, args values.Object) (values.Value, error) { var str string v, ok := args.Get(stringArgV) if !ok { return nil, fmt.Errorf("missing argument %q", stringArgV) } if !v.IsNull() && v.Type().Nature() == semantic.String { str = v.Str() b := []byte(str) if len(b) != 1 { return nil, fmt.Errorf("%q is not a valid argument: argument length is not equal to 1", stringArgV) } val := b[0] r := rune(val) boolValue := Fn(r) return values.NewBool(boolValue), nil } return nil, fmt.Errorf("procedure cannot be executed") }, false, ) } var strlen = values.NewFunction( "strlen", runtime.MustLookupBuiltinType("strings", "strlen"), func(ctx context.Context, args values.Object) (values.Value, error) { v, ok := args.Get(stringArgV) if !ok { return nil, fmt.Errorf("missing argument %q", stringArgV) } if !v.IsNull() && v.Type().Nature() == semantic.String { return values.NewInt(int64(utf8.RuneCountInString(v.Str()))), nil } return nil, fmt.Errorf("procedure cannot be executed") }, false, ) var substring = values.NewFunction( "substring", runtime.MustLookupBuiltinType("strings", "substring"), func(ctx context.Context, args values.Object) (values.Value, error) { return interpreter.DoFunctionCallContext(func(ctx context.Context, args interpreter.Arguments) (values.Value, error) { v, err := args.GetRequiredString(stringArgV) if err != nil { return nil, err } a, err := args.GetRequiredInt(start) if err != nil { return nil, err } b, err := args.GetRequiredInt(end) if err != nil { return nil, err } s := []rune(v) if a < 0 { a = 0 } if b > int64(len(v)) { b = int64(len(v)) } return values.NewString(string(s[a:b])), nil }, ctx, args) }, false, ) func init() { runtime.RegisterPackageValue("strings", "strlen", strlen) runtime.RegisterPackageValue("strings", "substring", substring) runtime.RegisterPackageValue("strings", "trim", generateDualArgStringFunction("trim", []string{stringArgV, cutset}, strings.Trim)) runtime.RegisterPackageValue("strings", "trimSpace", generateSingleArgStringFunction("trimSpace", strings.TrimSpace)) runtime.RegisterPackageValue("strings", "trimPrefix", generateDualArgStringFunction("trimSuffix", []string{stringArgV, prefix}, strings.TrimPrefix)) runtime.RegisterPackageValue("strings", "trimSuffix", generateDualArgStringFunction("trimSuffix", []string{stringArgV, suffix}, strings.TrimSuffix)) runtime.RegisterPackageValue("strings", "title", generateSingleArgStringFunction("title", strings.Title)) runtime.RegisterPackageValue("strings", "toUpper", generateSingleArgStringFunction("toUpper", strings.ToUpper)) runtime.RegisterPackageValue("strings", "toLower", generateSingleArgStringFunction("toLower", strings.ToLower)) runtime.RegisterPackageValue("strings", "trimRight", generateDualArgStringFunction("trimRight", []string{stringArgV, cutset}, strings.TrimRight)) runtime.RegisterPackageValue("strings", "trimLeft", generateDualArgStringFunction("trimLeft", []string{stringArgV, cutset}, strings.TrimLeft)) runtime.RegisterPackageValue("strings", "toTitle", generateSingleArgStringFunction("toTitle", strings.ToTitle)) runtime.RegisterPackageValue("strings", "hasPrefix", generateDualArgStringFunctionReturnBool("hasPrefix", []string{stringArgV, prefix}, strings.HasPrefix)) runtime.RegisterPackageValue("strings", "hasSuffix", generateDualArgStringFunctionReturnBool("hasSuffix", []string{stringArgV, suffix}, strings.HasSuffix)) runtime.RegisterPackageValue("strings", "containsStr", generateDualArgStringFunctionReturnBool("containsStr", []string{stringArgV, substr}, strings.Contains)) runtime.RegisterPackageValue("strings", "containsAny", generateDualArgStringFunctionReturnBool("containsAny", []string{stringArgV, chars}, strings.ContainsAny)) runtime.RegisterPackageValue("strings", "equalFold", generateDualArgStringFunctionReturnBool("equalFold", []string{stringArgV, stringArgT}, strings.EqualFold)) runtime.RegisterPackageValue("strings", "compare", generateDualArgStringFunctionReturnInt("compare", []string{stringArgV, stringArgT}, strings.Compare)) runtime.RegisterPackageValue("strings", "countStr", generateDualArgStringFunctionReturnInt("countStr", []string{stringArgV, substr}, strings.Count)) runtime.RegisterPackageValue("strings", "index", generateDualArgStringFunctionReturnInt("index", []string{stringArgV, substr}, strings.Index)) runtime.RegisterPackageValue("strings", "indexAny", generateDualArgStringFunctionReturnInt("indexAny", []string{stringArgV, chars}, strings.IndexAny)) runtime.RegisterPackageValue("strings", "lastIndex", generateDualArgStringFunctionReturnInt("lastIndex", []string{stringArgV, substr}, strings.LastIndex)) runtime.RegisterPackageValue("strings", "lastIndexAny", generateDualArgStringFunctionReturnInt("lastIndexAny", []string{stringArgV, substr}, strings.LastIndexAny)) runtime.RegisterPackageValue("strings", "isDigit", generateUnicodeIsFunction("isDigit", unicode.IsDigit)) runtime.RegisterPackageValue("strings", "isLetter", generateUnicodeIsFunction("isLetter", unicode.IsLetter)) runtime.RegisterPackageValue("strings", "isLower", generateUnicodeIsFunction("isLower", unicode.IsLower)) runtime.RegisterPackageValue("strings", "isUpper", generateUnicodeIsFunction("isUpper", unicode.IsUpper)) runtime.RegisterPackageValue("strings", "repeat", generateRepeat("repeat", []string{stringArgV, integer}, strings.Repeat)) runtime.RegisterPackageValue("strings", "replace", generateReplace("replace", []string{stringArgV, stringArgT, stringArgU, integer}, strings.Replace)) runtime.RegisterPackageValue("strings", "replaceAll", generateReplaceAll("replaceAll", []string{stringArgV, stringArgT, stringArgU}, replaceAll)) runtime.RegisterPackageValue("strings", "split", generateSplit("split", []string{stringArgV, stringArgT}, strings.Split)) runtime.RegisterPackageValue("strings", "splitAfter", generateSplit("splitAfter", []string{stringArgV, stringArgT}, strings.SplitAfter)) runtime.RegisterPackageValue("strings", "splitN", generateSplitN("splitN", []string{stringArgV, stringArgT, integer}, strings.SplitN)) runtime.RegisterPackageValue("strings", "splitAfterN", generateSplitN("splitAfterN", []string{stringArgV, stringArgT, integer}, strings.SplitAfterN)) SpecialFns = map[string]values.Function{ "joinStr": values.NewFunction( "joinStr", runtime.MustLookupBuiltinType("strings", "joinStr"), func(ctx context.Context, args values.Object) (values.Value, error) { var argVals = make([]values.Value, 2) val, ok := args.Get("arr") if !ok { return nil, fmt.Errorf("missing argument %q", "arr") } arr := val.Array() // XXX: remove when array/stream are different types <https://github.com/influxdata/flux/issues/4343> if _, ok := arr.(values.TableObject); ok { return nil, fmt.Errorf("%q cannot be a table stream; expected an array", "arr") } if arr.Len() >= 0 { et, _ := arr.Type().ElemType() if et.Nature() != semantic.String { return nil, fmt.Errorf("expected elements of argument %q to be of type %v, got type %v", "arr", semantic.String, arr.Get(0).Type().Nature()) } } argVals[0] = val val, ok = args.Get("v") if !ok { return nil, fmt.Errorf("missing argument %q", "v") } if val.IsNull() || val.Type().Nature() != semantic.String { return nil, fmt.Errorf("expected argument %q to be of type %v, got type %v value %v", "v", semantic.String, val.Type().Nature(), val) } argVals[1] = val stringArray := argVals[0].Array() var newStringArray []string // n.b. should already have been vetted as non-TableObject // above, making the Len() call safe. for i := 0; i < stringArray.Len(); i++ { newStringArray = append(newStringArray, stringArray.Get(i).Str()) } return values.NewString(strings.Join(newStringArray, argVals[1].Str())), nil }, false, ), } runtime.RegisterPackageValue("strings", "joinStr", SpecialFns["joinStr"]) }
1
17,667
If I'm reading this right, this changes the parameter name so it'd constitute a breaking change :cold_sweat: Are we missing a test that should have been failing up until now?
influxdata-flux
go
@@ -1,8 +1,3 @@ -/* - Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - SPDX-License-Identifier: Apache-2.0 -*/ - package com.spring.sns; import org.springframework.beans.factory.annotation.Autowired;
1
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package com.spring.sns; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.*; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @Controller public class SubController { @Autowired SnsService sns; @GetMapping("/") public String root() { return "index"; } @GetMapping("/subscribe") public String add() { return "sub"; } // Adds a new item to the database. @RequestMapping(value = "/addEmail", method = RequestMethod.POST) @ResponseBody String addItems(HttpServletRequest request, HttpServletResponse response) { String email = request.getParameter("email"); return sns.subEmail(email); } @RequestMapping(value = "/delSub", method = RequestMethod.POST) @ResponseBody String delSub(HttpServletRequest request, HttpServletResponse response) { String email = request.getParameter("email"); sns.unSubEmail(email); return email +" was successfully deleted!"; } @RequestMapping(value = "/addMessage", method = RequestMethod.POST) @ResponseBody String addMessage(HttpServletRequest request, HttpServletResponse response) { String body = request.getParameter("body"); sns.pubTopic(body); return "Message sent"; } @RequestMapping(value = "/getSubs", method = RequestMethod.GET) @ResponseBody String getSubs(HttpServletRequest request, HttpServletResponse response) { String mySub = sns.getAllSubscriptions(); return mySub; } }
1
20,176
Need copyright/license info.
awsdocs-aws-doc-sdk-examples
rb
@@ -3,10 +3,8 @@ package admissioncontroller import ( "fmt" "net/http" - "strings" admissionv1beta1 "k8s.io/api/admission/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" rulesv1 "github.com/kubeedge/kubeedge/cloud/pkg/apis/rules/v1"
1
package admissioncontroller import ( "fmt" "net/http" "strings" admissionv1beta1 "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" rulesv1 "github.com/kubeedge/kubeedge/cloud/pkg/apis/rules/v1" ) func admitRuleEndpoint(review admissionv1beta1.AdmissionReview) *admissionv1beta1.AdmissionResponse { reviewResponse := admissionv1beta1.AdmissionResponse{} var msg string switch review.Request.Operation { case admissionv1beta1.Create: raw := review.Request.Object.Raw ruleEndpoint := rulesv1.RuleEndpoint{} deserializer := codecs.UniversalDeserializer() if _, _, err := deserializer.Decode(raw, nil, &ruleEndpoint); err != nil { klog.Errorf("validation failed with error: %v", err) msg = err.Error() break } err := validateRuleEndpoint(&ruleEndpoint) if err != nil { msg = err.Error() break } reviewResponse.Allowed = true klog.Info("admission validation passed!") case admissionv1beta1.Delete, admissionv1beta1.Connect: //no rule defined for above operations, greenlight for all of above. reviewResponse.Allowed = true klog.Info("admission validation passed!") default: msg = fmt.Sprintf("Unsupported webhook operation %v", review.Request.Operation) klog.Warning(msg) } if !reviewResponse.Allowed { reviewResponse.Result = &metav1.Status{Message: strings.TrimSpace(msg)} } return &reviewResponse } func validateRuleEndpoint(ruleEndpoint *rulesv1.RuleEndpoint) error { switch ruleEndpoint.Spec.RuleEndpointType { case rulesv1.RuleEndpointTypeServiceBus: _, exist := ruleEndpoint.Spec.Properties["service_port"] if !exist { return fmt.Errorf("\"service_port\" property missed in property when ruleEndpoint is \"servicebus\"") } } return nil } func serveRuleEndpoint(w http.ResponseWriter, r *http.Request) { serve(w, r, admitRuleEndpoint) }
1
22,872
Is this `info log` necessary? If it is not useful, can you consider deleting it?
kubeedge-kubeedge
go
@@ -103,7 +103,7 @@ func TestConfigLoadEncryptedFailures(t *testing.T) { err = config.Data().Load() require.Error(t, err) - // This file contains invalid base64 characters. + // This file's header starts with RCLONE_ENCRYPT_V1 instead of V0. assert.NoError(t, config.SetConfigPath("./testdata/enc-too-new.conf")) err = config.Data().Load() require.Error(t, err)
1
// These are in an external package because we need to import configfile // // Internal tests are in crypt_internal_test.go package config_test import ( "context" "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestConfigLoadEncrypted(t *testing.T) { var err error oldConfigPath := config.GetConfigPath() assert.NoError(t, config.SetConfigPath("./testdata/encrypted.conf")) defer func() { assert.NoError(t, config.SetConfigPath(oldConfigPath)) config.ClearConfigPassword() }() // Set correct password err = config.SetConfigPassword("asdf") require.NoError(t, err) err = config.Data().Load() require.NoError(t, err) sections := config.Data().GetSectionList() var expect = []string{"nounc", "unc"} assert.Equal(t, expect, sections) keys := config.Data().GetKeyList("nounc") expect = []string{"type", "nounc"} assert.Equal(t, expect, keys) } func TestConfigLoadEncryptedWithValidPassCommand(t *testing.T) { ctx := context.Background() ci := fs.GetConfig(ctx) oldConfigPath := config.GetConfigPath() oldConfig := *ci assert.NoError(t, config.SetConfigPath("./testdata/encrypted.conf")) // using ci.PasswordCommand, correct password ci.PasswordCommand = fs.SpaceSepList{"echo", "asdf"} defer func() { assert.NoError(t, config.SetConfigPath(oldConfigPath)) config.ClearConfigPassword() *ci = oldConfig ci.PasswordCommand = nil }() config.ClearConfigPassword() err := config.Data().Load() require.NoError(t, err) sections := config.Data().GetSectionList() var expect = []string{"nounc", "unc"} assert.Equal(t, expect, sections) keys := config.Data().GetKeyList("nounc") expect = []string{"type", "nounc"} assert.Equal(t, expect, keys) } func TestConfigLoadEncryptedWithInvalidPassCommand(t *testing.T) { ctx := context.Background() ci := fs.GetConfig(ctx) oldConfigPath := config.GetConfigPath() oldConfig := *ci assert.NoError(t, config.SetConfigPath("./testdata/encrypted.conf")) // using ci.PasswordCommand, incorrect password ci.PasswordCommand = fs.SpaceSepList{"echo", "asdf-blurfl"} defer func() { assert.NoError(t, config.SetConfigPath(oldConfigPath)) config.ClearConfigPassword() *ci = oldConfig ci.PasswordCommand = nil }() config.ClearConfigPassword() err := config.Data().Load() require.Error(t, err) assert.Contains(t, err.Error(), "using --password-command derived password") } func TestConfigLoadEncryptedFailures(t *testing.T) { var err error // This file should be too short to be decoded. oldConfigPath := config.GetConfigPath() assert.NoError(t, config.SetConfigPath("./testdata/enc-short.conf")) defer func() { assert.NoError(t, config.SetConfigPath(oldConfigPath)) }() err = config.Data().Load() require.Error(t, err) // This file contains invalid base64 characters. assert.NoError(t, config.SetConfigPath("./testdata/enc-invalid.conf")) err = config.Data().Load() require.Error(t, err) // This file contains invalid base64 characters. assert.NoError(t, config.SetConfigPath("./testdata/enc-too-new.conf")) err = config.Data().Load() require.Error(t, err) // This file does not exist. assert.NoError(t, config.SetConfigPath("./testdata/filenotfound.conf")) err = config.Data().Load() assert.Equal(t, config.ErrorConfigFileNotFound, err) }
1
14,666
Unrelated change, but seems to be the correct purpose of the test.
rclone-rclone
go
@@ -142,6 +142,7 @@ module.exports = class DragDrop extends Plugin { const restrictions = this.uppy.opts.restrictions return ( <input + id={'input-' + this.id} class="uppy-DragDrop-input" type="file" tabindex={-1}
1
const { Plugin } = require('@uppy/core') const Translator = require('@uppy/utils/lib/Translator') const toArray = require('@uppy/utils/lib/toArray') const isDragDropSupported = require('@uppy/utils/lib/isDragDropSupported') const getDroppedFiles = require('@uppy/utils/lib/getDroppedFiles') const { h } = require('preact') /** * Drag & Drop plugin * */ module.exports = class DragDrop extends Plugin { static VERSION = require('../package.json').version constructor (uppy, opts) { super(uppy, opts) this.type = 'acquirer' this.id = this.opts.id || 'DragDrop' this.title = 'Drag & Drop' this.defaultLocale = { strings: { dropHereOr: 'Drop files here or %{browse}', browse: 'browse' } } // Default options const defaultOpts = { target: null, inputName: 'files[]', width: '100%', height: '100%', note: null } // Merge default options with the ones set by user this.opts = { ...defaultOpts, ...opts } // Check for browser dragDrop support this.isDragDropSupported = isDragDropSupported() this.removeDragOverClassTimeout = null this.i18nInit() // Bind `this` to class methods this.onInputChange = this.onInputChange.bind(this) this.handleDragOver = this.handleDragOver.bind(this) this.handleDragLeave = this.handleDragLeave.bind(this) this.handleDrop = this.handleDrop.bind(this) this.addFiles = this.addFiles.bind(this) this.render = this.render.bind(this) } setOptions (newOpts) { super.setOptions(newOpts) this.i18nInit() } i18nInit () { this.translator = new Translator([this.defaultLocale, this.uppy.locale, this.opts.locale]) this.i18n = this.translator.translate.bind(this.translator) this.i18nArray = this.translator.translateArray.bind(this.translator) this.setPluginState() // so that UI re-renders and we see the updated locale } addFiles (files) { const descriptors = files.map((file) => ({ source: this.id, name: file.name, type: file.type, data: file, meta: { // path of the file relative to the ancestor directory the user selected. // e.g. 'docs/Old Prague/airbnb.pdf' relativePath: file.relativePath || null } })) try { this.uppy.addFiles(descriptors) } catch (err) { this.uppy.log(err) } } onInputChange (event) { this.uppy.log('[DragDrop] Files selected through input') const files = toArray(event.target.files) this.addFiles(files) // We clear the input after a file is selected, because otherwise // change event is not fired in Chrome and Safari when a file // with the same name is selected. // ___Why not use value="" on <input/> instead? // Because if we use that method of clearing the input, // Chrome will not trigger change if we drop the same file twice (Issue #768). event.target.value = null } handleDrop (event, dropCategory) { event.preventDefault() event.stopPropagation() clearTimeout(this.removeDragOverClassTimeout) // 2. Remove dragover class this.setPluginState({ isDraggingOver: false }) // 3. Add all dropped files this.uppy.log('[DragDrop] Files were dropped') const logDropError = (error) => { this.uppy.log(error, 'error') } getDroppedFiles(event.dataTransfer, { logDropError }) .then((files) => this.addFiles(files)) } handleDragOver (event) { event.preventDefault() event.stopPropagation() // 1. Add a small (+) icon on drop // (and prevent browsers from interpreting this as files being _moved_ into the browser, https://github.com/transloadit/uppy/issues/1978) event.dataTransfer.dropEffect = 'copy' clearTimeout(this.removeDragOverClassTimeout) this.setPluginState({ isDraggingOver: true }) } handleDragLeave (event) { event.preventDefault() event.stopPropagation() clearTimeout(this.removeDragOverClassTimeout) // Timeout against flickering, this solution is taken from drag-drop library. Solution with 'pointer-events: none' didn't work across browsers. this.removeDragOverClassTimeout = setTimeout(() => { this.setPluginState({ isDraggingOver: false }) }, 50) } renderHiddenFileInput () { const restrictions = this.uppy.opts.restrictions return ( <input class="uppy-DragDrop-input" type="file" tabindex={-1} focusable="false" ref={(ref) => { this.fileInputRef = ref }} name={this.opts.inputName} multiple={restrictions.maxNumberOfFiles !== 1} accept={restrictions.allowedFileTypes} onchange={this.onInputChange} /> ) } renderArrowSvg () { return ( <svg aria-hidden="true" focusable="false" class="UppyIcon uppy-DragDrop-arrow" width="16" height="16" viewBox="0 0 16 16"> <path d="M11 10V0H5v10H2l6 6 6-6h-3zm0 0" fill-rule="evenodd" /> </svg> ) } renderLabel () { return ( <div class="uppy-DragDrop-label"> {this.i18nArray('dropHereOr', { browse: <span class="uppy-DragDrop-browse">{this.i18n('browse')}</span> })} </div> ) } renderNote () { return ( <span class="uppy-DragDrop-note">{this.opts.note}</span> ) } render (state) { const dragDropClass = ` uppy-Root uppy-u-reset uppy-DragDrop-container ${this.isDragDropSupported ? 'uppy-DragDrop--is-dragdrop-supported' : ''} ${this.getPluginState().isDraggingOver ? 'uppy-DragDrop--isDraggingOver' : ''} ` const dragDropStyle = { width: this.opts.width, height: this.opts.height } return ( <button type="button" class={dragDropClass} style={dragDropStyle} onClick={() => this.fileInputRef.click()} onDragOver={this.handleDragOver} onDragLeave={this.handleDragLeave} onDrop={this.handleDrop} > {this.renderHiddenFileInput()} <div class="uppy-DragDrop-inner"> {this.renderArrowSvg()} {this.renderLabel()} {this.renderNote()} </div> </button> ) } install () { this.setPluginState({ isDraggingOver: false }) const target = this.opts.target if (target) { this.mount(target, this) } } uninstall () { this.unmount() } }
1
13,081
Should it be `'uppy-input-`?
transloadit-uppy
js
@@ -191,7 +191,7 @@ static inline double mtrace(int n,double A[8][8],double B[8][8]) { void PairMGPT::make_triplet(bond_data *ij_bond,bond_data *ik_bond, triplet_data *triptr) { - if (1) { + if (true) { const trmul_fun tr_mul = linalg.tr_mul; tr_mul(&(ij_bond->H.m[1][0]), &(ik_bond->H.m[1][0]) ,&(triptr->H1H2.m[1][0]) ); tr_mul(&(ij_bond->Hx.m[1][0]),&(ik_bond->H.m[1][0]) ,&(triptr->H1xH2.m[1][0]));
1
// clang-format off /* ---------------------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator https://www.lammps.org/, Sandia National Laboratories Steve Plimpton, [email protected] Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- Contributing authors: Tomas Oppelstrup, LLNL ([email protected]) and John Moriarty, LLNL ([email protected]) Fast MGPT algorithm developed by Tomas Oppelstrup (2015) based on the matrix MGPT v4.4 FORTRAN routine of John Moriarty (2006) as converted to C++ for LAMMPS application by Jamie Marian and Alexander Stukowski (2011). See LLNL copyright notice at bottom of this file. ------------------------------------------------------------------------- */ #include "pair_mgpt.h" #include "atom.h" #include "comm.h" #include "error.h" #include "force.h" #include "memory.h" #include "neigh_list.h" #include "neigh_request.h" #include "neighbor.h" #include <cassert> #include <cmath> #include <cstring> using namespace LAMMPS_NS; //#define TIMING_ON #ifdef TIMING_ON #include <sys/time.h> #include <time.h> //#include "rdtsc.h" #ifdef __bgq__ #include <hwi/include/bqc/A2_inlines.h> #endif static double gettime(int x = 0) { if (1) { /* struct timeval tv; gettimeofday(&tv,nullptr); return tv.tv_sec + 1e-6 * tv.tv_usec; */ /* const double x = 1.0 / CLOCKS_PER_SEC; return clock() * x; */ //const double invfreq = 1.0 / 2394.108e6; /* const double invfreq = 1.0 / 700e6; unsigned long long int x = rdtsc(); return x*invfreq; */ const double invfreq = 1.0 / 1.6e9; unsigned long long int x = GetTimeBase(); return x*invfreq; } else return 0.0; } #else static double gettime(int /*x*/ = 0) { return 0.0; } #endif /* ---------------------------------------------------------------------- */ PairMGPT::PairMGPT(LAMMPS *lmp) : Pair(lmp) { single_enable = 0; one_coeff = 1; ghostneigh = 1; } PairMGPT::~PairMGPT() { if (allocated) { memory->destroy(setflag); memory->destroy(cutsq); memory->destroy(cutghost); } } /* ---------------------------------------------------------------------- */ static double t_make_b2 = 0.0,n_make_b2 = 0.0; template<typename intype,typename outtype,int ni,int nj> void fmatconv(intype *array) { outtype *cast = (outtype *) array; for (int i = 0; i<ni; i++) for (int j = 0; j<nj; j++) cast[i*nj+j] = array[i*nj+j]; } void PairMGPT::make_bond(const double xx[][3],int i,int j,bond_data *bptr) { double rrij[3],rij; int p; double t0,t1; /* Check that alignment requirements for SIMD code are fulfilled */ assert( (((unsigned long long int) (bptr->H.m )) & 31) == 0 ); assert( (((unsigned long long int) (bptr->Hx.m)) & 31) == 0 ); assert( (((unsigned long long int) (bptr->Hy.m)) & 31) == 0 ); assert( (((unsigned long long int) (bptr->Hz.m)) & 31) == 0 ); rij = 0.0; for (p = 0; p<3; p++) { rrij[p] = xx[i][p] - xx[j][p]; rij = rij + rrij[p]*rrij[p]; } /* Zero all matrix elements */ for (i = 0; i<8; i++) for (j = 0; j<8; j++) { bptr->H.m[i][j] = 0.0; bptr->Hx.m[i][j] = 0.0; bptr->Hy.m[i][j] = 0.0; bptr->Hz.m[i][j] = 0.0; bptr->Hz.m[j][i] = 0.0; } if (rij <= rcrit*rcrit) { t0 = gettime(); if (lang == 3) { hamltn_5_raw(rrij[0],rrij[1],rrij[2], bptr->H.m ,bptr->Hx.m, bptr->Hy.m,bptr->Hz.m,&bptr->fl_deriv_sum); } else { hamltn_7_raw(rrij[0],rrij[1],rrij[2], bptr->H.m ,bptr->Hx.m, bptr->Hy.m,bptr->Hz.m,&bptr->fl_deriv_sum); } t1 = gettime(); t_make_b2 += t1-t0; n_make_b2++; } else { bptr->fl_deriv_sum = 0.0; } if (linalg.single) { fmatconv<double,float,7,8>(&(bptr->H.m[1][0])); fmatconv<double,float,7,8>(&(bptr->Hx.m[1][0])); fmatconv<double,float,7,8>(&(bptr->Hy.m[1][0])); fmatconv<double,float,7,8>(&(bptr->Hz.m[1][0])); } } static double t_trace = 0.0,n_trace = 0.0; /* static inline double mtrace(int n,double A[8][8],double B[8][8]) { double t0,t1; double s; t0 = gettime(); if (n == 5) s = mtrace_5(A,B); else if (n == 7) s = mtrace_7(A,B); else { s = 0.0; for (int i = 1; i<=n; i++) for (int j = 1; j<=n; j++) s = s + A[i][j]*B[i][j]; } t1 = gettime(); t_trace += t1-t0; n_trace++; return s; } */ void PairMGPT::make_triplet(bond_data *ij_bond,bond_data *ik_bond, triplet_data *triptr) { if (1) { const trmul_fun tr_mul = linalg.tr_mul; tr_mul(&(ij_bond->H.m[1][0]), &(ik_bond->H.m[1][0]) ,&(triptr->H1H2.m[1][0]) ); tr_mul(&(ij_bond->Hx.m[1][0]),&(ik_bond->H.m[1][0]) ,&(triptr->H1xH2.m[1][0])); tr_mul(&(ij_bond->Hy.m[1][0]),&(ik_bond->H.m[1][0]) ,&(triptr->H1yH2.m[1][0])); tr_mul(&(ij_bond->Hz.m[1][0]),&(ik_bond->H.m[1][0]) ,&(triptr->H1zH2.m[1][0])); tr_mul(&(ij_bond->H.m[1][0]) ,&(ik_bond->Hx.m[1][0]),&(triptr->H1H2x.m[1][0])); tr_mul(&(ij_bond->H.m[1][0]) ,&(ik_bond->Hy.m[1][0]),&(triptr->H1H2y.m[1][0])); tr_mul(&(ij_bond->H.m[1][0]) ,&(ik_bond->Hz.m[1][0]),&(triptr->H1H2z.m[1][0])); } else { transprod(ij_bond->H, ik_bond->H ,triptr->H1H2 ); transprod(ij_bond->Hx,ik_bond->H ,triptr->H1xH2); transprod(ij_bond->Hy,ik_bond->H ,triptr->H1yH2); transprod(ij_bond->Hz,ik_bond->H ,triptr->H1zH2); transprod(ij_bond->H ,ik_bond->Hx,triptr->H1H2x); transprod(ij_bond->H ,ik_bond->Hy,triptr->H1H2y); transprod(ij_bond->H ,ik_bond->Hz,triptr->H1H2z); } } static double t_make_t = 0.0,t_make_b = 0.0,n_make = 0.0; PairMGPT::triplet_data *PairMGPT::get_triplet(const double xx[][3],int i,int j,int k, Hash<bond_data,Doublet> *bhash, triplet_data *twork, double *dvir_ij_p,double *dvir_ik_p) { const int recompute = 0; static bond_data bij_work,bik_work; double t0,t1; bond_data *bij = nullptr,*bik = nullptr; triplet_data *tptr = nullptr; t0 = gettime(); if (recompute == 0) { bij = bhash->Lookup(Doublet(i,j)); bik = bhash->Lookup(Doublet(i,k)); } if (bij == nullptr) { if (recompute == 0) bij = bhash->Insert(Doublet(i,j)); else bij = &bij_work; if (i < j) make_bond(xx,i,j,bij); else make_bond(xx,j,i,bij); } if (bik == nullptr) { if (recompute == 0) bik = bhash->Insert(Doublet(i,k)); else bik = &bik_work; if (i < k) make_bond(xx,i,k,bik); else make_bond(xx,k,i,bik); } t1 = gettime(); t_make_b += t1-t0; t0 = gettime(); if (bij != nullptr && bij != nullptr) { tptr = twork; make_triplet(bij,bik,tptr); *dvir_ij_p = bij->fl_deriv_sum; *dvir_ik_p = bik->fl_deriv_sum; } else { *dvir_ij_p = 0.0; *dvir_ik_p = 0.0; } t1 = gettime(); t_make_t += t1-t0; n_make++; return tptr; } double PairMGPT::numderiv3t(double xx[][3],int i,int j,int k,int p) { static bond_data Bij,Bjk,Bki; const double delta = 1e-5; const double xsave = xx[i][p]; double e1,e2; const double vc = splinepot.vc; xx[i][p] = xsave + delta; make_bond(xx,i,j,&Bij); make_bond(xx,j,k,&Bjk); make_bond(xx,k,i,&Bki); e1 = trace(prodmat(Bij.H,Bjk.H),Bki.H) * (vc/anorm3); xx[i][p] = xsave - delta; make_bond(xx,i,j,&Bij); if (0) { /* This bond doesn't change when i is perturbed */ make_bond(xx,j,k,&Bjk); } make_bond(xx,k,i,&Bki); e2 = trace(prodmat(Bij.H,Bjk.H),Bki.H) * (vc/anorm3); xx[i][p] = xsave; return (e1 - e2)/(2.0*delta); } double PairMGPT::numderiv3v(double xx[][3],int i,int j,int k,int p,int ipert) { static bond_data Bij,Bik; const double delta = 1e-5; const double xsave = xx[ipert][p]; double e1,e2; const double vd = splinepot.vd; xx[ipert][p] = xsave + delta; make_bond(xx,i,j,&Bij); make_bond(xx,i,k,&Bik); e1 = trace(prodmat(Bij.H,Bij.H),prodmat(Bik.H,Bik.H)) * (vd/anorm4); xx[ipert][p] = xsave - delta; make_bond(xx,i,j,&Bij); make_bond(xx,i,k,&Bik); e2 = trace(prodmat(Bij.H,Bij.H),prodmat(Bik.H,Bik.H)) * (vd/anorm4); xx[ipert][p] = xsave; return (e1 - e2)/(2.0*delta); } double PairMGPT::numderiv4(double xx[][3],int i,int j,int k,int m,int p) { static bond_data Bij,Bjk,Bkm,Bmi; const double delta = 1e-5; const double xsave = xx[i][p]; double e1,e2; const double ve = splinepot.ve; xx[i][p] = xsave + delta; make_bond(xx,i,j,&Bij); make_bond(xx,j,k,&Bjk); make_bond(xx,k,m,&Bkm); make_bond(xx,m,i,&Bmi); e1 = trace(prodmat(Bij.H,Bjk.H),prodmat(Bkm.H,Bmi.H)) * (ve/anorm4); xx[i][p] = xsave - delta; make_bond(xx,i,j,&Bij); if (0) { /* Only the i coordinates changed... */ make_bond(xx,j,k,&Bjk); make_bond(xx,k,m,&Bkm); } make_bond(xx,m,i,&Bmi); e2 = trace(prodmat(Bij.H,Bjk.H),prodmat(Bkm.H,Bmi.H)) * (ve/anorm4); xx[i][p] = xsave; return (e1 - e2)/(2.0*delta); } static double dtol = 1e-6; void PairMGPT::force_debug_3t(double xx[][3], int i0,int j0,int k0, int i ,int j ,int k , double dfix,double dfiy,double dfiz, double dfjx,double dfjy,double dfjz, double dfkx,double dfky,double dfkz) { double dfi[3],dfj[3],dfk[3]; dfi[0] = dfix; dfi[1] = dfiy; dfi[2] = dfiz; dfj[0] = dfjx; dfj[1] = dfjy; dfj[2] = dfjz; dfk[0] = dfkx; dfk[1] = dfky; dfk[2] = dfkz; for (int p = 0; p<3; p++) { /* Compute numerical derivatives by displacing atoms i,j,k */ double ndfi,ndfj,ndfk; ndfi = -numderiv3t(xx,i,j,k,p); ndfj = -numderiv3t(xx,j,k,i,p); ndfk = -numderiv3t(xx,k,i,j,p); if ((fabs(dfi[p] - ndfi) > dtol && fabs(dfi[p] - ndfi) > dtol*fabs(ndfi)) || (fabs(dfj[p] - ndfj) > dtol && fabs(dfj[p] - ndfj) > dtol*fabs(ndfj)) || (fabs(dfk[p] - ndfk) > dtol && fabs(dfk[p] - ndfk) > dtol*fabs(ndfk))) { printf("Force error in T12 & T23 & T31 :: i,j,k = %d,%d,%d\n",i0,j0,k0); printf(" dE/d%c[i] = %20.10e %20.10e\n", 'x'+p,ndfi, dfi[p]); printf(" dE/d%c[j] = %20.10e %20.10e\n", 'x'+p,ndfj, dfj[p]); printf(" dE/d%c[k] = %20.10e %20.10e\n", 'x'+p,ndfk, dfk[p]); printf("\n"); } } } void PairMGPT::force_debug_3v(double xx[][3], int i0,int j0,int k0, int i ,int j ,int k , double dfix,double dfiy,double dfiz, double dfjx,double dfjy,double dfjz, double dfkx,double dfky,double dfkz) { double dfi[3],dfj[3],dfk[3]; dfi[0] = dfix; dfi[1] = dfiy; dfi[2] = dfiz; dfj[0] = dfjx; dfj[1] = dfjy; dfj[2] = dfjz; dfk[0] = dfkx; dfk[1] = dfky; dfk[2] = dfkz; for (int p = 0; p<3; p++) { /* Compute numerical derivatives by displacing atoms i,j,k */ double ndfi,ndfj,ndfk; ndfi = -numderiv3v(xx,i,j,k,p,i0); ndfj = -numderiv3v(xx,i,j,k,p,j0); ndfk = -numderiv3v(xx,i,j,k,p,k0); if ((fabs(dfi[p] - ndfi) > dtol && fabs(dfi[p] - ndfi) > dtol*fabs(ndfi)) || (fabs(dfj[p] - ndfj) > dtol && fabs(dfj[p] - ndfj) > dtol*fabs(ndfj)) || (fabs(dfk[p] - ndfk) > dtol && fabs(dfk[p] - ndfk) > dtol*fabs(ndfk))) { printf("Force error in T12 :: i,j,k = %d,%d,%d\n",i0,j0,k0); printf(" dE/d%c[i] = %20.10e %20.10e\n", 'x'+p,ndfi, dfi[p]); printf(" dE/d%c[j] = %20.10e %20.10e\n", 'x'+p,ndfj, dfj[p]); printf(" dE/d%c[k] = %20.10e %20.10e\n", 'x'+p,ndfk, dfk[p]); printf("\n"); } } } void PairMGPT::force_debug_4(double xx[][3], int i0,int j0,int k0,int m0, int i ,int j ,int k ,int m , double dfix,double dfiy,double dfiz, double dfjx,double dfjy,double dfjz, double dfkx,double dfky,double dfkz, double dfmx,double dfmy,double dfmz) { double dfi[3],dfj[3],dfk[3],dfm[3]; dfi[0] = dfix; dfi[1] = dfiy; dfi[2] = dfiz; dfj[0] = dfjx; dfj[1] = dfjy; dfj[2] = dfjz; dfk[0] = dfkx; dfk[1] = dfky; dfk[2] = dfkz; dfm[0] = dfmx; dfm[1] = dfmy; dfm[2] = dfmz; const int ii0[] = {i0,j0,k0,m0},ii[] = {i,j,k,m,i,j,k}; for (int p = 0; p<3; p++) { /* Compute numerical derivatives by displacing atoms i,j,k,m */ double ndfi,ndfj,ndfk,ndfm; if (1) { double ndf[] = {0.0,0.0,0.0,0.0}; for (int s = 0; s<4; s++) for (int t = 0; t<4; t++) if (ii[s] == ii0[t]) ndf[t] = -numderiv4(xx,ii[s],ii[s+1],ii[s+2],ii[s+3],p); ndfi = ndf[0]; ndfj = ndf[1]; ndfk = ndf[2]; ndfm = ndf[3]; } else { ndfi = -numderiv4(xx,i,j,k,m,p); ndfj = -numderiv4(xx,j,k,m,i,p); ndfk = -numderiv4(xx,k,m,i,j,p); ndfm = -numderiv4(xx,m,i,j,k,p); } if ((fabs(dfi[p] - ndfi) > dtol && fabs(dfi[p] - ndfi) > dtol*fabs(ndfi)) || (fabs(dfj[p] - ndfj) > dtol && fabs(dfj[p] - ndfj) > dtol*fabs(ndfj)) || (fabs(dfk[p] - ndfk) > dtol && fabs(dfk[p] - ndfk) > dtol*fabs(ndfk)) || (fabs(dfm[p] - ndfm) > dtol && fabs(dfm[p] - ndfm) > dtol*fabs(ndfm))) { printf("Force error in T31 & T64 :: i,j,k,m = %d,%d,%d,%d\n",i0,j0,k0,m0); printf(" dE/d%c[i] = %20.10e %20.10e\n", 'x'+p,ndfi, dfi[p]); printf(" dE/d%c[j] = %20.10e %20.10e\n", 'x'+p,ndfj, dfj[p]); printf(" dE/d%c[k] = %20.10e %20.10e\n", 'x'+p,ndfk, dfk[p]); printf(" dE/d%c[m] = %20.10e %20.10e\n", 'x'+p,ndfm, dfm[p]); printf("\n"); } } } /* #define trd_update_4(T12,T45,coord) \ do { \ trd1 = transtrace(T12->H1##coord##H2,T45->H1H2 ); \ trd2 = transtrace(T12->H1H2##coord,T45->H1H2 ); \ trd3 = transtrace(T12->H1H2 ,T45->H1##coord##H2); \ trd4 = transtrace(T12->H1H2 ,T45->H1H2##coord ); \ } while (0) */ #define trd_update_4(T12,T45) \ do { \ tr_trace3(&(T45->H1H2.m[1][0]), \ &(T12->H1xH2.m[1][0]),&utr1x.d, \ &(T12->H1yH2.m[1][0]),&utr1y.d, \ &(T12->H1zH2.m[1][0]),&utr1z.d); \ tr_trace3(&(T45->H1H2.m[1][0]), \ &(T12->H1H2x.m[1][0]),&utr2x.d, \ &(T12->H1H2y.m[1][0]),&utr2y.d, \ &(T12->H1H2z.m[1][0]),&utr2z.d); \ tr_trace3(&(T12->H1H2.m[1][0]), \ &(T45->H1xH2.m[1][0]),&utr3x.d, \ &(T45->H1yH2.m[1][0]),&utr3y.d, \ &(T45->H1zH2.m[1][0]),&utr3z.d); \ tr_trace3(&(T12->H1H2.m[1][0]), \ &(T45->H1H2x.m[1][0]),&utr4x.d, \ &(T45->H1H2y.m[1][0]),&utr4y.d, \ &(T45->H1H2z.m[1][0]),&utr4z.d); \ if (linalg.single) { \ trd1x = utr1x.f; trd2x = utr2x.f; trd3x = utr3x.f; trd4x = utr4x.f; \ trd1y = utr1y.f; trd2y = utr2y.f; trd3y = utr3y.f; trd4y = utr4y.f; \ trd1z = utr1z.f; trd2z = utr2z.f; trd3z = utr3z.f; trd4z = utr4z.f; \ } else { \ trd1x = utr1x.d; trd2x = utr2x.d; trd3x = utr3x.d; trd4x = utr4x.d; \ trd1y = utr1y.d; trd2y = utr2y.d; trd3y = utr3y.d; trd4y = utr4y.d; \ trd1z = utr1z.d; trd2z = utr2z.d; trd3z = utr3z.d; trd4z = utr4z.d; \ } \ } while (0) #define dfix_update_4a(coord) \ do { \ dfi##coord = ( (-sij)*trd1##coord + (-sim)*trd3##coord ) * (ve / anorm4); \ dfj##coord = ( ( sij)*trd1##coord + (-sjk)*trd2##coord ) * (ve / anorm4); \ dfk##coord = ( ( sjk)*trd2##coord + (-skm)*trd4##coord ) * (ve / anorm4); \ dfm##coord = ( ( sim)*trd3##coord + ( skm)*trd4##coord ) * (ve / anorm4); \ } while (0) #define dfix_update_4b(coord) \ do { \ dfi##coord = ( ( ski)*trd1##coord + (-sim)*trd3##coord ) * (ve / anorm4); \ dfj##coord = ( (-sjk)*trd2##coord + (-sjm)*trd4##coord ) * (ve / anorm4); \ dfk##coord = ( (-ski)*trd1##coord + ( sjk)*trd2##coord ) * (ve / anorm4); \ dfm##coord = ( ( sim)*trd3##coord + ( sjm)*trd4##coord ) * (ve / anorm4); \ } while (0); #define dfix_update_4c(coord) \ do { \ dfi##coord = ( (-sij)*trd1##coord + ( ski)*trd2##coord ) * (ve / anorm4); \ dfj##coord = ( ( sij)*trd1##coord + (-sjm)*trd3##coord ) * (ve / anorm4); \ dfk##coord = ( (-ski)*trd2##coord + (-skm)*trd4##coord ) * (ve / anorm4); \ dfm##coord = ( ( sjm)*trd3##coord + ( skm)*trd4##coord ) * (ve / anorm4); \ } while (0); #define accumulate_forces_2(w) \ do { \ fix = fix + dfix*(w); \ fiy = fiy + dfiy*(w); \ fiz = fiz + dfiz*(w); \ \ fjx = fjx + dfjx*(w); \ fjy = fjy + dfjy*(w); \ fjz = fjz + dfjz*(w); \ } while (0) #define accumulate_forces_3(w) \ do { \ accumulate_forces_2(w); \ fkx = fkx + dfkx*(w); \ fky = fky + dfky*(w); \ fkz = fkz + dfkz*(w); \ } while (0) #define accumulate_forces_4(w) \ do { \ accumulate_forces_3(w); \ fmx = fmx + dfmx*(w); \ fmy = fmy + dfmy*(w); \ fmz = fmz + dfmz*(w); \ } while (0) #define restrict __restrict__ #ifdef __bg__ #define const #endif static int ntr_calls = 0; static trtrace3_fun tr_internal; static void tr_count(const double * restrict A, const double * restrict B1,double * restrict t1, const double * restrict B2,double * restrict t2, const double * restrict B3,double * restrict t3) { tr_internal(A,B1,t1,B2,t2,B3,t3); ntr_calls++; } #ifdef __bg__ #undef const #endif #undef restrict int PairMGPT::Matrix::sz; void PairMGPT::compute_x(const int *nnei,const int * const *nlist, double *e_s,double *e_p,double *e_t,double *e_q, int evflag,int newton_pair) { Hash<bond_data,Doublet> bond_hash(100000); int i,j,k,m,ix,jx,kx,mx,itag,jtag,p; double e_single,e_pair,e_triplet,e_triplet_c,e_quad; double volvir2; double nbc = 0.0,tbl = 0.0,tbm = 0.0; const int lmax_local = lmax; //if(evflag) printf("##### ev flag is set... wasting cycles...\n"); *e_s = -99.0; *e_p = -99.0; *e_t = -99.0; *e_q = -99.0; double t0,t1; t0 = gettime(1); e_single = e_pair = e_triplet = e_triplet_c = e_quad = 0.0; volvir2 = 0.0; t_make_t = t_make_b = t_make_b2 = t_trace = 0.0; n_make = n_make_b2 = n_trace = 0.0; double tx0,tx1,tsort = 0.0,tpair = 0.0,tlookup = 0.0; double ttriplet = 0.0,tquad = 0.0,tmem = 0.0; double ntsort = 0.0,ntpair = 0.0,ntlookup = 0.0; double nttriplet = 0.0,ntquad = 0.0,ntmem = 0.0,ntquaditer = 0.0; double mcount = 0.0,mcount2 = 0.0, qcount = 0.0; double fix,fjx,fkx,fmx,dfix,dfjx,dfkx,dfmx; double fiy,fjy,fky,fmy,dfiy,dfjy,dfky,dfmy; double fiz,fjz,fkz,fmz,dfiz,dfjz,dfkz,dfmz; double fsave[4][3] = { {0.0} } /* {{0.0}} is to get rid of uninitialized use warning */; //const int numerical_pair_forces = (nbody_flag/16)%2; const int pair_forces = (nbody_flag/2)%2,three_body_forces = (nbody_flag/4)%2,four_body_forces = (nbody_flag/8)%2; const int pair_energies = (nbody_flag/2)%2,three_body_energies = (nbody_flag/4)%2,four_body_energies = (nbody_flag/8)%2; const int single_energies = nbody_flag%2; const int triplet_debug = 0,quad_debug = 0; /* Energy and force scale factor for unit conversion. */ const double e_scale = 0.5; #ifdef NEIGHMASK #define NIDX(x) (x) #else #define NIDX(x) ((x) & NEIGHMASK) #endif int nneitot,*first,*nlist_short; double w2,w3,w4; triplet_data T12work,T23work,T31work,T45work,T56work,T64work; triplet_data *T12,*T23,*T31,*T45,*T56,*T64; int c_ij,c_jk,c_ki,c_im,c_jm,c_km; int mi,mj,mk; double tr0,tr1,tr2,tr3; double v33,v43; double rcut2_pair = rmax*rmax,rcut2_bond = rcrit*rcrit,rij2; int ntot,nloc; double dvir_ij,dvir_jk,dvir_ki,dvir_im,dvir_jm,dvir_km; double vir3t = 0.0,vir3v = 0.0,vir4 = 0.0; double (*xx)[3],(*ff)[3],(*ss)[3]; #ifdef TIMING_ON tr_internal = linalg.tr_trace; ntr_calls = 0; const trtrace3_fun tr_trace3 = tr_count; #else const trtrace3_fun tr_trace3 = linalg.tr_trace; #endif union { double d; float f; } utr1x,utr2x,utr3x,utr4x,utr1y,utr2y,utr3y,utr4y,utr1z,utr2z,utr3z,utr4z; double trd1x,trd2x,trd3x,trd4x; double trd1y,trd2y,trd3y,trd4y; double trd1z,trd2z,trd3z,trd4z; tx0 = gettime(); double rhoinv; { double vtot = 1.0; double ntot = atom->natoms; for (i = 0; i<3; i++) vtot = vtot * (domain->boxhi[i] - domain->boxlo[i]); rhoinv = vtot / ntot; } /* Make sure triplet data work area is aligned and zeroed out. */ { assert(T12work.align_check() == 0); assert(T23work.align_check() == 0); assert(T31work.align_check() == 0); assert(T45work.align_check() == 0); assert(T56work.align_check() == 0); assert(T64work.align_check() == 0); T12work.zero(); T23work.zero(); T31work.zero(); T45work.zero(); T56work.zero(); T64work.zero(); } ntot = atom->nlocal + atom->nghost; nloc = atom->nlocal; //printf("[%3d] Allocating local array, size is %d atoms...\n",comm->me,j); xx = (double (*)[3]) memory->smalloc(sizeof(double [3]) * ntot,"mgpt: local position vector."); ff = (double (*)[3]) memory->smalloc(sizeof(double [3]) * ntot,"mgpt: local force vector."); //printf("[%3d] Initializing arrays...\n",comm->me); const int triclinic = domain->triclinic; double alpha[3] = {0.0,0.0,0.0}; if (triclinic) { double E[3][3],EX[3][3]; int cyc[] = {0,1,2,0,1}; ss = (double (*)[3]) memory->smalloc(sizeof(double [3]) * ntot, "mgpt: local reduced coordinate vector."); for (i = 0; i<3; i++) { for (j = 0; j<3; j++) E[i][j] = 0.0; E[i][i] = domain->subhi_lamda[i] - domain->sublo_lamda[i]; domain->lamda2x(E[i],EX[i]); } for (i = 0; i<3; i++) { int i1 = cyc[i+1],i2 = cyc[i+2]; double dot = 0.0,ns2 = 0.0; for (j = 0; j<3; j++) { int j1 = cyc[j+1],j2 = cyc[j+2]; double cj = EX[i1][j1]*EX[i2][j2] - EX[i1][j2]*EX[i2][j1]; ns2 = ns2 + cj*cj; dot = dot + EX[i][j]*cj; } alpha[i] = E[i][i] / (dot/sqrt(ns2)); if (comm->me == 0) { static int count = 0; if (count < 3) printf("@@@ alpha(%d) = %15.5e\n",i+1,alpha[i]); count++; } if (alpha[i] < 0.0) alpha[i] = -alpha[i]; } } else ss = xx; nneitot = 0; for (ix = 0; ix<ntot; ix++) { for (p = 0; p<3; p++) { xx[ix][p] = atom->x[ix][p]; ff[ix][p] = 0.0; } if (triclinic) domain->x2lamda(xx[ix],ss[ix]); nneitot = nneitot + nnei[ix]; } first = (int *) memory->smalloc(sizeof(int) * (ntot+1),"mgpt: first"); nlist_short = (int *) memory->smalloc(sizeof(int) * nneitot,"mgpt: nlist_short"); tx1 = gettime(); tmem += tx1-tx0; ntmem++; //printf("[%3d] Starting calculation...\n",comm->me); fix = fjx = fkx = fmx = 0.0; fiy = fjy = fky = fmy = 0.0; fiz = fjz = fkz = fmz = 0.0; int c_p = 0, c_t = 0, c_q = 0; if (0) if (domain->triclinic) { if (comm->me == 0) printf("Can not handle triclinic box yet\n"); error->all(__FILE__,__LINE__,"Can not handle triclinic cell with mgpt yet."); } /* for (i = 0; i<nloc; i++) { printf("Atom %3d:: %10.3f %10.3f %10.3f\n", i,xx[i][0],xx[i][1],xx[i][2]); } */ first[0] = 0; for (i = 0; i<ntot; i++) { fix = fiy = fiz = 0.0; first[i+1] = first[i]; const int c1 = c1_outside(ss[i],triclinic,alpha); tx0 = gettime(); for (jx = 0; jx<nnei[i]; jx++) { fjx = fjy = fjz = 0.0; j = NIDX( nlist[i][jx] ); rij2 = 0.0; for (p = 0; p<3; p++) { double t = xx[i][p] - xx[j][p]; rij2 = rij2 + t*t; } if (c1 == 0 && rij2 < rcut2_pair) { if (j < i) { w2 = get_weight(triclinic,ss[i],ss[j]); if (w2 > 0.0) { /* Compute pair energy/force */ double de_pair,df,rij = sqrt(rij2); splinepot.eval_pot(rij,&de_pair,&df); de_pair = de_pair * e_scale * w2; df = df / rij * w2; if (pair_energies == 0) de_pair = 0.0; e_pair = e_pair + de_pair; c_p++; if (pair_forces == 0) df = 0.0; if (volpres_flag && pair_energies) { double dvir; splinepot.eval_vir(rij,&dvir); volvir2 = volvir2 - dvir * w2; /* Per-atom virial contribution of volumetric energy term */ if (vflag_atom) for (int pp = 0; pp<3; pp++) { //virial[i] = virial[i] + rhoinv*e_scale*volvir2; vatom[i][pp] -= 0.5 * rhoinv*e_scale*dvir*w2; vatom[j][pp] -= 0.5 * rhoinv*e_scale*dvir*w2; } } double drijx = xx[j][0] - xx[i][0]; double drijy = xx[j][1] - xx[i][1]; double drijz = xx[j][2] - xx[i][2]; fix = fix + df*drijx; fjx = fjx - df*drijx; fiy = fiy + df*drijy; fjy = fjy - df*drijy; fiz = fiz + df*drijz; fjz = fjz - df*drijz; if (evflag) { //ev_tally(i,j,nloc,newton_pair,de_pair,0.0,df,-drijx,-drijy,-drijz); /* To fix stress-per-atom scaling, and sign */ ev_tally(i,j,nloc,newton_pair,de_pair,0.0,-df * e_scale,-drijx,-drijy,-drijz); } ff[j][0] += fjx * e_scale; ff[j][1] += fjy * e_scale; ff[j][2] += fjz * e_scale; } } } if (rij2 < rcut2_bond && c2_outside(ss[i],ss[j],triclinic,alpha) == 0) { /* Add j to short neighbor list for i. Insert j to keep list sorted. */ p = first[i+1]-1; while (p >= first[i] && nlist_short[p] > j) { nlist_short[p+1] = nlist_short[p]; p = p - 1; } nlist_short[p+1] = j; first[i+1] = first[i+1] + 1; if (first[i+1] > nneitot) { printf("nneitot = %d, short list full. i=%d\n", nneitot,i); error->one(__FILE__,__LINE__,"Shit! Short list full\n"); } } } ff[i][0] += fix * e_scale; ff[i][1] += fiy * e_scale; ff[i][2] += fiz * e_scale; tx1 = gettime(); tpair += tx1-tx0; ntpair += nnei[i]; } for (i = 0; i<ntot; i++) { fix = fiy = fiz = 0.0; /* Use short lists for triplets and quadruplets. For open (2-bonded) triplets, can only use k<j, but not k<i. For closed (3-bonded) triplets, we can assume k<j<i. Quadruplets: Always use k<j<i, and require m<i. If 5-bonded with im bond, ignore the quadruplet. If 6-bonded, require m<k. For 4-bonded quadruplets, we can still use k<j, but also assume max(m,j)<i */ if (three_body_energies || three_body_forces || four_body_energies || four_body_forces) for (jx = first[i]; jx<first[i+1]; jx++) { fjx = fjy = fjz = 0.0; j = nlist_short[jx]; for (kx = first[i]; kx<jx; kx++) { fkx = fky = fkz = 0.0; k = nlist_short[kx]; /* Search lists of j and k, and see if 1) j is in k-list (closed triplet) 2) j and k have a common neighbor (closed quadruplet) */ c_ij = c_ki = 1; const int sij = (i < j) ? 1 : -1; const int sjk = (j < k) ? 1 : -1; const int ski = (k < i) ? 1 : -1; T12 = T23 = T31 = nullptr; mj = first[j]; /* Since i is in the j-list, and i > k and the list is sorted, the loop below terminates:-) */ while (mj < first[j+1] && nlist_short[mj] < k) mj = mj + 1; if (mj < first[j+1] && nlist_short[mj] == k) { /* Closed triplet */ c_jk = 1; if (j > i) continue; /* Require k<j<i for closed triplets */ } else { /* Open triplet */ c_jk = 0; } tx0 = gettime(); w3 = get_weight(triclinic,ss[i],ss[j],ss[k]); int triplet_defer; if (w3 > 0.0) { triplet_defer = 0; dvir_ij = dvir_jk = dvir_ki = 0.0; if (c_ij && c_jk) T12 = get_triplet(xx,j,i,k,&bond_hash,&T12work,&dvir_ij,&dvir_jk); if (c_ki && c_jk) T23 = get_triplet(xx,k,i,j,&bond_hash,&T23work,&dvir_ki,&dvir_jk); if (c_ij && c_ki) T31 = get_triplet(xx,i,j,k,&bond_hash,&T31work,&dvir_ij,&dvir_ki); if (evflag) { fsave[0][0] = fix; fsave[0][1] = fiy; fsave[0][2] = fiz; fsave[1][0] = fjx; fsave[1][1] = fjy; fsave[1][2] = fjz; fsave[2][0] = fkx; fsave[2][1] = fky; fsave[2][2] = fkz; fix = fiy = fiz = 0.0; fjx = fjy = fjz = 0.0; fkx = fky = fkz = 0.0; } tr0 = tr1 = tr2 = tr3 = 0.0; double xvir3t,xvir3v; xvir3t = xvir3v = 0.0; if (T12 && T23) { bond_data *bki = bond_hash.Lookup(Doublet(k,i)); if (three_body_energies && evflag) { tr0 = transtrace(T12->H1H2,bki->H); double dvir = ((dvir_ij + dvir_jk + bki->fl_deriv_sum)*splinepot.vc + splinepot.dvc)*tr0*w3/anorm3; vir3t = vir3t + dvir; xvir3t = xvir3t + dvir; } mcount2++; { const double vc = splinepot.vc; tr_trace3(&(bki->H.m[1][0]), &(T12->H1xH2.m[1][0]),&utr1x.d, &(T12->H1yH2.m[1][0]),&utr1y.d, &(T12->H1zH2.m[1][0]),&utr1z.d); tr_trace3(&(bki->H.m[1][0]), &(T12->H1H2x.m[1][0]),&utr2x.d, &(T12->H1H2y.m[1][0]),&utr2y.d, &(T12->H1H2z.m[1][0]),&utr2z.d); tr_trace3(&(T12->H1H2.m[1][0]), &(bki->Hx.m[1][0]),&utr3x.d, &(bki->Hy.m[1][0]),&utr3y.d, &(bki->Hz.m[1][0]),&utr3z.d); if (linalg.single) { trd1x = utr1x.f; trd2x = utr2x.f; trd3x = utr3x.f; trd1y = utr1y.f; trd2y = utr2y.f; trd3y = utr3y.f; trd1z = utr1z.f; trd2z = utr2z.f; trd3z = utr3z.f; } else { trd1x = utr1x.d; trd2x = utr2x.d; trd3x = utr3x.d; trd1y = utr1y.d; trd2y = utr2y.d; trd3y = utr3y.d; trd1z = utr1z.d; trd2z = utr2z.d; trd3z = utr3z.d; } dfix = ( (-sij)*trd1x + ( ski)*trd3x ) * (vc / anorm3); dfjx = ( ( sij)*trd1x + (-sjk)*trd2x ) * (vc / anorm3); dfkx = ( ( sjk)*trd2x + (-ski)*trd3x ) * (vc / anorm3); dfiy = ( (-sij)*trd1y + ( ski)*trd3y ) * (vc / anorm3); dfjy = ( ( sij)*trd1y + (-sjk)*trd2y ) * (vc / anorm3); dfky = ( ( sjk)*trd2y + (-ski)*trd3y ) * (vc / anorm3); dfiz = ( (-sij)*trd1z + ( ski)*trd3z ) * (vc / anorm3); dfjz = ( ( sij)*trd1z + (-sjk)*trd2z ) * (vc / anorm3); dfkz = ( ( sjk)*trd2z + (-ski)*trd3z ) * (vc / anorm3); } if (triplet_debug) force_debug_3t(xx,i,j,k, i,j,k, dfix,dfiy,dfiz, dfjx,dfjy,dfjz, dfkx,dfky,dfkz); if (three_body_forces) accumulate_forces_3(w3); } if (T12 != nullptr) { //printf("T12 i,j,k = %d,%d,%d\n",i,j,k); mcount++; if (three_body_energies && evflag) { tr1 = transtrace(T12->H1H2,T12->H1H2); double dvir = (2.0*(dvir_ij + dvir_jk)*splinepot.vd + splinepot.dvd)*tr1*w3/anorm4; vir3v = vir3v + dvir; xvir3v = xvir3v + dvir; } { const double vd = splinepot.vd; tr_trace3(&(T12->H1H2.m[1][0]), &(T12->H1xH2.m[1][0]),&utr1x.d, &(T12->H1yH2.m[1][0]),&utr1y.d, &(T12->H1zH2.m[1][0]),&utr1z.d); tr_trace3(&(T12->H1H2.m[1][0]), &(T12->H1H2x.m[1][0]),&utr2x.d, &(T12->H1H2y.m[1][0]),&utr2y.d, &(T12->H1H2z.m[1][0]),&utr2z.d); if (linalg.single) { trd1x = utr1x.f; trd2x = utr2x.f; trd1y = utr1y.f; trd2y = utr2y.f; trd1z = utr1z.f; trd2z = utr2z.f; } else { trd1x = utr1x.d; trd2x = utr2x.d; trd1y = utr1y.d; trd2y = utr2y.d; trd1z = utr1z.d; trd2z = utr2z.d; } dfix = 2.0*(-sij)*trd1x * (vd / anorm4); dfkx = 2.0*( sjk)*trd2x * (vd / anorm4); dfjx = -(dfix + dfkx); dfiy = 2.0*(-sij)*trd1y * (vd / anorm4); dfky = 2.0*( sjk)*trd2y * (vd / anorm4); dfjy = -(dfiy + dfky); dfiz = 2.0*(-sij)*trd1z * (vd / anorm4); dfkz = 2.0*( sjk)*trd2z * (vd / anorm4); dfjz = -(dfiz + dfkz); } if (triplet_debug) /* Compare forces to numerical derivatives */ force_debug_3v(xx,i,j,k, j,i,k, dfix,dfiy,dfiz, dfjx,dfjy,dfjz, dfkx,dfky,dfkz); if (three_body_forces) accumulate_forces_3(w3); } if (T23 != nullptr) { //printf("T23 i,j,k = %d,%d,%d\n",i,j,k); mcount++; if (three_body_energies && evflag) { tr2 = transtrace(T23->H1H2,T23->H1H2); double dvir = (2.0*(dvir_jk + dvir_ki)*splinepot.vd + splinepot.dvd)*tr2*w3/anorm4; vir3v = vir3v + dvir; xvir3v = xvir3v + dvir; } { const double vd = splinepot.vd; tr_trace3(&(T23->H1H2.m[1][0]), &(T23->H1xH2.m[1][0]),&utr1x.d, &(T23->H1yH2.m[1][0]),&utr1y.d, &(T23->H1zH2.m[1][0]),&utr1z.d); tr_trace3(&(T23->H1H2.m[1][0]), &(T23->H1H2x.m[1][0]),&utr2x.d, &(T23->H1H2y.m[1][0]),&utr2y.d, &(T23->H1H2z.m[1][0]),&utr2z.d); if (linalg.single) { trd1x = utr1x.f; trd2x = utr2x.f; trd1y = utr1y.f; trd2y = utr2y.f; trd1z = utr1z.f; trd2z = utr2z.f; } else { trd1x = utr1x.d; trd2x = utr2x.d; trd1y = utr1y.d; trd2y = utr2y.d; trd1z = utr1z.d; trd2z = utr2z.d; } dfix = 2.0*( ski)*trd1x * (vd / anorm4); dfjx = 2.0*(-sjk)*trd2x * (vd / anorm4); dfkx = -(dfix + dfjx); dfiy = 2.0*( ski)*trd1y * (vd / anorm4); dfjy = 2.0*(-sjk)*trd2y * (vd / anorm4); dfky = -(dfiy + dfjy); dfiz = 2.0*( ski)*trd1z * (vd / anorm4); dfjz = 2.0*(-sjk)*trd2z * (vd / anorm4); dfkz = -(dfiz + dfjz); } if (triplet_debug) /* Compare forces to numerical derivatives */ force_debug_3v(xx,i,j,k, k,i,j, dfix,dfiy,dfiz, dfjx,dfjy,dfjz, dfkx,dfky,dfkz); if (three_body_forces) accumulate_forces_3(w3); } if (T31 != nullptr) { //printf("T31 i,j,k = %d,%d,%d\n",i,j,k); mcount++; if (three_body_energies && evflag) { tr3 = transtrace(T31->H1H2,T31->H1H2); double dvir = (2.0*(dvir_ki + dvir_ij)*splinepot.vd + splinepot.dvd)*tr3*w3/anorm4; vir3v = vir3v + dvir; xvir3v = xvir3v + dvir; } { const double vd = splinepot.vd; tr_trace3(&(T31->H1H2.m[1][0]), &(T31->H1xH2.m[1][0]),&utr1x.d, &(T31->H1yH2.m[1][0]),&utr1y.d, &(T31->H1zH2.m[1][0]),&utr1z.d); tr_trace3(&(T31->H1H2.m[1][0]), &(T31->H1H2x.m[1][0]),&utr2x.d, &(T31->H1H2y.m[1][0]),&utr2y.d, &(T31->H1H2z.m[1][0]),&utr2z.d); if (linalg.single) { trd1x = utr1x.f; trd2x = utr2x.f; trd1y = utr1y.f; trd2y = utr2y.f; trd1z = utr1z.f; trd2z = utr2z.f; } else { trd1x = utr1x.d; trd2x = utr2x.d; trd1y = utr1y.d; trd2y = utr2y.d; trd1z = utr1z.d; trd2z = utr2z.d; } dfjx = 2.0*( sij)*trd1x * (vd / anorm4); dfkx = 2.0*(-ski)*trd2x * (vd / anorm4); dfix = -(dfjx + dfkx); dfjy = 2.0*( sij)*trd1y * (vd / anorm4); dfky = 2.0*(-ski)*trd2y * (vd / anorm4); dfiy = -(dfjy + dfky); dfjz = 2.0*( sij)*trd1z * (vd / anorm4); dfkz = 2.0*(-ski)*trd2z * (vd / anorm4); dfiz = -(dfjz + dfkz); } if (triplet_debug) /* Compare forces to numerical derivatives */ force_debug_3v(xx,i,j,k, i,j,k, dfix,dfiy,dfiz, dfjx,dfjy,dfjz, dfkx,dfky,dfkz); if (three_body_forces) accumulate_forces_3(w3); } v33 = tr0 / anorm3; v43 = (tr1 + tr2 + tr3) / anorm4; double de_triplet = (splinepot.vc*v33 + splinepot.vd*v43) * e_scale * w3; e_triplet = e_triplet + de_triplet; e_triplet_c = e_triplet_c + splinepot.vc*v33 * e_scale * w3; c_t++; //printf("xxxx %6d %6d %6d :: %20.10e\n",1,2,3,de_triplet); if (evflag) { double drji[3],drki[3]; double fj[3] = {fjx,fjy,fjz},fk[3] = {fkx,fky,fkz}; for (int p = 0; p<3; p++) { drji[p] = xx[j][p] - xx[i][p]; drki[p] = xx[k][p] - xx[i][p]; /* To fix stress-per-atom scaling. */ fj[p] *= e_scale; fk[p] *= e_scale; } ev_tally3(i,j,k,de_triplet,0.0,fj,fk,drji,drki); if (volpres_flag && vflag_atom) { //virial[i] = virial[i] - (vir3v + vir3t) * rhoinv*e_scale; double dvir = -(xvir3v + xvir3t) * rhoinv*e_scale * (1.0/3.0); for (int pp = 0; pp<3; pp++) { vatom[i][pp] += dvir; vatom[j][pp] += dvir; vatom[k][pp] += dvir; } } fix = fix+fsave[0][0]; fiy = fiy+fsave[0][1]; fiz = fiz+fsave[0][2]; fjx = fjx+fsave[1][0]; fjy = fjy+fsave[1][1]; fjz = fjz+fsave[1][2]; fkx = fkx+fsave[2][0]; fky = fky+fsave[2][1]; fkz = fkz+fsave[2][2]; } tx1 = gettime(); ttriplet += tx1 - tx0; nttriplet++; } else { triplet_defer = 1; } if (four_body_energies || four_body_forces) if (j < i) { /* Search for quadruplet */ tx0 = gettime(); mj = first[j]; mk = first[k]; /* i is in both the j-list and the k-list, and i > k, and lists are sorted, so the loop terminates. */ while (nlist_short[mj] < i && nlist_short[mk] < i) { if (mj >= first[j+1] || mk >= first[k+1]) { printf("Illegal quad...\n" " j=%d first[j]=%d first[j+1]=%d mj=%d\n" " k=%d first[k]=%d first[k+1]=%d mk=%d\n", j,first[j],first[j+1],mj, k,first[k],first[k+1],mk); error->one(__FILE__,__LINE__,"Shit, brkoen quad loop"); } if (nlist_short[mj] == nlist_short[mk]) { /* Closed quadruplet */ m = nlist_short[mj]; c_jm = c_km = 1; const int sim = (i < m) ? 1 : -1; const int sjm = (j < m) ? 1 : -1; const int skm = (k < m) ? 1 : -1; w4 = get_weight(triclinic,ss[i],ss[j],ss[k],ss[m]); if (w4 > 0.0) { /* Alrady know ij,jk,ki,jm,km bonds. Look for im bond. */ mi = first[i]; while (mi < first[i+1] && nlist_short[mi] < m) mi = mi + 1; if (mi < first[i+1] && nlist_short[mi] == m) c_im = 1; else c_im = 0; if (c_im == 0 || c_jk == 0 || (c_jk && c_im && m < k)) { if (triplet_defer) { dvir_ij = dvir_jk = dvir_ki = 0.0; if (c_ij && c_jk) T12 = get_triplet(xx,j,i,k,&bond_hash,&T12work,&dvir_ij,&dvir_jk); if (c_ki && c_jk) T23 = get_triplet(xx,k,i,j,&bond_hash,&T23work,&dvir_ki,&dvir_jk); if (c_ij && c_ki) T31 = get_triplet(xx,i,j,k,&bond_hash,&T31work,&dvir_ij,&dvir_ki); triplet_defer = 0; } fmx = fmy = fmz = 0.0; double xvir4 = 0.0; if (evflag) { fsave[0][0] = fix; fsave[0][1] = fiy; fsave[0][2] = fiz; fsave[1][0] = fjx; fsave[1][1] = fjy; fsave[1][2] = fjz; fsave[2][0] = fkx; fsave[2][1] = fky; fsave[2][2] = fkz; fsave[3][0] = fmx; fsave[3][1] = fmy; fsave[3][2] = fmz; fix = fiy = fiz = 0.0; fjx = fjy = fjz = 0.0; fkx = fky = fkz = 0.0; fmx = fmy = fmz = 0.0; } tr1 = tr2 = tr3 = 0.0; dvir_im = dvir_jm = dvir_km = 0.0; T45 = T56 = T64 = nullptr; if (T12 != nullptr && c_km && c_im) T45 = get_triplet(xx,m,i,k,&bond_hash,&T45work,&dvir_im,&dvir_km); if (T23 != nullptr && c_im && c_jm) T56 = get_triplet(xx,m,i,j,&bond_hash,&T56work,&dvir_im,&dvir_jm); if (T31 != nullptr && c_jm && c_km) T64 = get_triplet(xx,m,j,k,&bond_hash,&T64work,&dvir_jm,&dvir_km); if (T12 != nullptr && T45 != nullptr) { if (four_body_energies && evflag) { tr1 = transtrace(T12->H1H2,T45->H1H2); double dvir = ( (dvir_ij + dvir_jk + dvir_im + dvir_km)*splinepot.ve + splinepot.dve )*tr1*w4/anorm4; vir4 = vir4 + dvir; xvir4 = xvir4 + dvir; } qcount++; { const double ve = splinepot.ve; trd_update_4(T12,T45); dfix_update_4a(x); dfix_update_4a(y); dfix_update_4a(z); } if (quad_debug) /* Compare forces to numerical derivatives */ force_debug_4(xx,i,j,k,m, i,j,k,m, dfix,dfiy,dfiz , dfjx,dfjy,dfjz, dfkx,dfky,dfkz , dfmx,dfmy,dfmz); if (four_body_forces) accumulate_forces_4(w4); } if (T23 != nullptr && T56 != nullptr) { if (four_body_energies && evflag) { tr2 = transtrace(T23->H1H2,T56->H1H2); double dvir = ( (dvir_ki + dvir_jk + dvir_im + dvir_jm)*splinepot.ve + splinepot.dve )*tr2*w4/anorm4; vir4 = vir4 + dvir; xvir4 = xvir4 + dvir; } qcount++; { const double ve = splinepot.ve; trd_update_4(T23,T56); dfix_update_4b(x); dfix_update_4b(y); dfix_update_4b(z); } if (quad_debug) /* Compare forces to numerical derivatives */ force_debug_4(xx,i,j,k,m, i,m,j,k, dfix,dfiy,dfiz , dfjx,dfjy,dfjz, dfkx,dfky,dfkz , dfmx,dfmy,dfmz); if (four_body_forces) accumulate_forces_4(w4); } if (T31 != nullptr && T64 != nullptr) { if (four_body_energies && evflag) { tr3 = transtrace(T31->H1H2,T64->H1H2); double dvir = ( (dvir_ki + dvir_ij + dvir_jm + dvir_km)*splinepot.ve + splinepot.dve )*tr3*w4/anorm4; vir4 = vir4 + dvir; xvir4 = xvir4 + dvir; } qcount++; { const double ve = splinepot.ve; /* X */ trd_update_4(T31,T64); dfix_update_4c(x); dfix_update_4c(y); dfix_update_4c(z); } if (quad_debug) /* Compare forces to numerical derivatives */ force_debug_4(xx,i,j,k,m, i,j,m,k, dfix,dfiy,dfiz , dfjx,dfjy,dfjz, dfkx,dfky,dfkz , dfmx,dfmy,dfmz); if (four_body_forces) accumulate_forces_4(w4); } double de_quad = splinepot.ve*(tr1 + tr2 + tr3)/anorm4 * e_scale * w4; e_quad = e_quad + de_quad; if ((T12 && T45) || (T23 && T56) || (T31 && T64)) { c_q++; } if (evflag) { double drim[3],drjm[3],drkm[3]; double fi[3] = {fix,fiy,fiz}; double fj[3] = {fjx,fjy,fjz}; double fk[3] = {fkx,fky,fkz}; for (int p = 0; p<3; p++) { drim[p] = xx[i][p] - xx[m][p]; drjm[p] = xx[j][p] - xx[m][p]; drkm[p] = xx[k][p] - xx[m][p]; fi[p] *= e_scale; fj[p] *= e_scale; fk[p] *= e_scale; } ev_tally4(i,j,k,m,de_quad,fi,fj,fk,drim,drjm,drkm); if (volpres_flag && vflag_atom) { //virial[i] = virial[i] - vir4 * rhoinv*e_scale; double dvir = -xvir4 * rhoinv*e_scale * (1.0/4.0); for (int pp = 0; pp<3; pp++) { vatom[i][pp] += dvir; vatom[j][pp] += dvir; vatom[k][pp] += dvir; vatom[m][pp] += dvir; } } fix = fix+fsave[0][0]; fiy = fiy+fsave[0][1]; fiz = fiz+fsave[0][2]; fjx = fjx+fsave[1][0]; fjy = fjy+fsave[1][1]; fjz = fjz+fsave[1][2]; fkx = fkx+fsave[2][0]; fky = fky+fsave[2][1]; fkz = fkz+fsave[2][2]; fmx = fmx+fsave[3][0]; fmy = fmy+fsave[3][1]; fmz = fmz+fsave[3][2]; } ff[m][0] += fmx * e_scale; ff[m][1] += fmy * e_scale; ff[m][2] += fmz * e_scale; } } mj = mj + 1; mk = mk + 1; } else if (nlist_short[mj] < nlist_short[mk]) { mj = mj + 1; } else { mk = mk + 1; } } tx1 = gettime(); tquad += tx1 - tx0; ntquad++; ntquaditer++; } ff[k][0] += fkx * e_scale; ff[k][1] += fky * e_scale; ff[k][2] += fkz * e_scale; } #undef transtrace ff[j][0] += fjx * e_scale; ff[j][1] += fjy * e_scale; ff[j][2] += fjz * e_scale; } ff[i][0] += fix * e_scale; ff[i][1] += fiy * e_scale; ff[i][2] += fiz * e_scale; if (single_energies == 1 && i < nloc) { const double evol0 = splinepot.evol0; if (eflag_global) { e_single = e_single + evol0 * e_scale; eng_vdwl = eng_vdwl + evol0 * e_scale; } if (eflag_atom) eatom[i] = eatom[i] + evol0 * e_scale; if (volpres_flag && vflag_atom) { for (int pp = 0; pp<3; pp++) vatom[i][pp] = vatom[i][pp] - rhoinv*splinepot.devol0*e_scale; } } } tx0 = gettime(); for (i = 0; i<ntot; i++) for (p = 0; p<3; p++) atom->f[i][p] = atom->f[i][p] + ff[i][p]; memory->sfree(nlist_short); memory->sfree(first); if (ss != xx) memory->sfree(ss); memory->sfree(ff); memory->sfree(xx); tx1 = gettime(); tmem += tx1-tx0; ntmem++; t1 = gettime(1); //printf("compute_x: c_p = %d c_t = %d c_q = %d\n",c_p,c_t,c_q); #ifdef TIMING_ON if (comm->me == 0) { double tsum = (tmem+tsort+tpair+tlookup+ttriplet+tquad); double nsum = (ntmem+ntsort+ntpair+ntlookup+nttriplet+ntquad); //double adj = ((t1-t0)-tsum)/nsum; /* Use adj = 6ns for RDTSC, and 58ns for gettimeofday, on monkfish.llnl.gov, 2.4GHz Intel Use adj = 35.945ns for RDTSC on uBGL (assumed rate set to 700MHz) */ double adj = 35.945e-9; double memadj = tmem - adj*ntmem , sortadj = tsort - adj*ntsort , pairadj = tpair - adj*ntpair , lookupadj = tlookup - adj*ntlookup , tripletadj = ttriplet - adj*nttriplet, quadadj = tquad - adj*ntquad , make_b_adj = t_make_b - adj*n_make, make_t_adj = t_make_t - adj*n_make, make_b2_adj = t_make_b2 - adj*n_make_b2, trace_adj = t_trace - adj*n_trace; printf("mgpt engy = %10.3fms\n",(t1-t0)*1e3); printf(" mem = %10.3fms n = %8.0f adj = %10.3fms one = %10.3fns\n", tmem*1e3,ntmem,memadj*1e3,memadj/ntmem*1e9); printf(" sort = %10.3fms n = %8.0f adj = %10.3fms one = %10.3fns\n", tsort*1e3,ntsort,sortadj*1e3,sortadj/ntsort*1e9); printf(" pair = %10.3fms n = %8.0f adj = %10.3fms one = %10.3fns\n", tpair*1e3,ntpair,pairadj*1e3,pairadj/ntpair*1e9); printf(" lookup = %10.3fms n = %8.0f adj = %10.3fms one = %10.3fns\n", tlookup*1e3,ntlookup,lookupadj*1e3,lookupadj/ntlookup*1e9); printf(" triplet = %10.3fms n = %8.0f adj = %10.3fms one = %10.3fns\n", ttriplet*1e3,nttriplet,tripletadj*1e3,tripletadj/nttriplet*1e9); printf(" quad = %10.3fms n = %8.0f adj = %10.3fms one = %10.3fns\n", tquad*1e3,ntquaditer,quadadj*1e3,quadadj/ntquaditer*1e9); printf(" sum = %10.3fms adj = %10.3fms\n", tsum*1e3,(tsum - adj*nsum)*1e3); printf("\n make_b = %10.3fms n = %8.0f adj = %10.3fms one = %10.3fns\n", t_make_b*1e3,n_make,make_b_adj*1e3,make_b_adj/n_make*1e9); printf(" make_b2 = %10.3fms n = %8.0f adj = %10.3fms one = %10.3fns\n", t_make_b2*1e3,n_make_b2,make_b2_adj*1e3,make_b2_adj/n_make_b2*1e9); printf(" make_t = %10.3fms n = %8.0f adj = %10.3fms one = %10.3fns\n\n", t_make_t*1e3,n_make,make_t_adj*1e3,make_t_adj/n_make*1e9); printf(" trace = %10.3fms n = %8.0f adj = %10.3fms one = %10.3fns\n\n", t_trace*1e3,n_trace,trace_adj*1e3,trace_adj/n_trace*1e9); printf("mcount (transpose + trace for triplet) = %.0f , %.0f qcount = %.0f lmax = %d\n", mcount,mcount2,qcount,lmax); printf("nbc=%.0f tbl=%.3fms tbm=%.3fms one tbl=%.3fns one tbm=%.3fns\n", nbc,(tbl-adj*nbc)*1e3,(tbm-adj*nbc)*1e3,(tbl/nbc-adj)*1e9, (tbm/nbc-adj)*1e9); printf("\n\nForces:\n"); printf("fix = %.3f fiy=%.3f fiz=%.3f\n",fix,fiy,fiz); printf("fjx = %.3f fjy=%.3f fjz=%.3f\n",fjx,fjy,fjz); printf("fkx = %.3f fky=%.3f fkz=%.3f\n",fkx,fky,fkz); printf("\n"); printf("Bonds : nsearch=%d maxlen=%d avg.len=%.3f\n", bond_hash.NSearch(),bond_hash.MaxLength(), bond_hash.NStep()/(double) bond_hash.NSearch()); printf("compute_x: c_p = %d c_t = %d c_q = %d\n",c_p,c_t,c_q); printf("@@ Total number of trace3 calls is %d, total number of make_triplet is %.1f\n", ntr_calls,n_make); { Hash<bond_data,Doublet>::Iterator iter = bond_hash.begin(); int nitem = 0,nhit = 0; while (iter != bond_hash.end()) { nitem++; nhit += iter.link()->hits; iter.next(); } printf("bond_hash hits: nitems=%d nhits=%d hits/item = %.3f\n", nitem,nhit,nhit/(double) nitem); } } #endif if (volpres_flag) { /* Include contributions to the pressure due to derivatines of the energy with respect to the potential input volume. */ /* The following lines have moved to beginning of functions, since they are used in calculating per-atom virial contributions */ /* double vtot = 1.0; double ntot = atom->natoms; for (i = 0; i<3; i++) vtot = vtot * (domain->boxhi[i] - domain->boxlo[i]); double rhoinv = vtot / ntot; */ if (single_energies) // Virial correction for self energy for (i = 0; i<3; i++) { //virial[i] = virial[i] + nloc*pot_input_vol*pvol0*e_scale; virial[i] = virial[i] - nloc*rhoinv*splinepot.devol0*e_scale; } if (pair_energies) // Virial correction for pair energy for (i = 0; i<3; i++) virial[i] = virial[i] + rhoinv*e_scale*volvir2; if (three_body_energies) // Virial correction for three body enegries for (i = 0; i<3; i++) { //virial[i] = virial[i] - pot_input_vol*(e_triplet_c*pc + (e_triplet-e_triplet_c)*pd); virial[i] = virial[i] - (vir3v + vir3t) * rhoinv*e_scale; } if (four_body_energies) // Virial correction for four body enegries for (i = 0; i<3; i++) { //virial[i] = virial[i] - pot_input_vol*e_quad*pe; virial[i] = virial[i] - vir4 * rhoinv*e_scale; } } *e_s = e_single; *e_p = e_pair; *e_t = e_triplet; *e_q = e_quad; } void PairMGPT::compute(int eflag, int vflag) { ev_init(eflag, vflag); int newton_pair = force->newton_pair; double e_s,e_p,e_t,e_q; //printf("newton_pair = %d, newton = %d, tag_enable = %d\n",force->newton_pair,force->newton,atom->tag_enable); if (newton_pair == 0) { printf("This is a problem. MGPT requires newton_pair flag to be on. Exiting...\n"); exit(1); } if (atom->tag_enable == 0) { printf("This is a problem. MGPT requires tag_enable flag to be on. Exiting...\n"); exit(1); } compute_x(listfull->numneigh,listfull->firstneigh,&e_s,&e_p,&e_t,&e_q,evflag,newton_pair); if (0) { // Stupid force calculation / verification int ii,nmax=-1; for (ii = 0; ii<listfull->inum + listfull->gnum; ii++) { int i = listfull->ilist[ii]; if (i > nmax) nmax = i; } nmax++; double *ffwork = new double[3*nmax]; double *ffloc = new double[3*listfull->inum]; double *ffloc2 = new double[3*listfull->inum]; double **ffptr = new double *[nmax]; for (ii = 0; ii<listfull->inum + listfull->gnum; ii++) ffptr[ii] = &ffwork[3*ii]; printf("Computing boundary forces\n"); for (ii = 0; ii<listfull->inum; ii++) { ffloc2[3*ii] = 0.0; ffloc2[3*ii+1] = 0.0; ffloc2[3*ii+2] = 0.0; int i = listfull->ilist[ii]; for (int jj = 0; jj<listfull->inum+listfull->gnum; jj++) { int j = listfull->ilist[jj]; if (atom->tag[i] == atom->tag[j]) for (int p = 0; p<3; p++) ffloc2[3*ii+p] += atom->f[j][p]; } } printf("Starting main displacement force calculation\n"); for (ii = 0; ii<listfull->inum; ii++) { int i = listfull->ilist[ii]; double **atom_f_save = atom->f; atom->f = ffptr; for (int p = 0; p<3; p++) { double xsave = atom->x[i][p]; const double delta = 1e-3; atom->x[i][p] = xsave + delta; for (int jj = 0; jj<3*nmax; jj++) ffwork[jj] = 0.0; compute_x(listfull->numneigh, listfull->firstneigh, &e_s,&e_p,&e_t,&e_q,evflag,newton_pair); double e1 = e_s + e_p + e_t + e_q; atom->x[i][p] = xsave - delta; for (int jj = 0; jj<3*nmax; jj++) ffwork[jj] = 0.0; compute_x(listfull->numneigh, listfull->firstneigh, &e_s,&e_p,&e_t,&e_q,evflag,newton_pair); double e2 = e_s + e_p + e_t + e_q; ffloc[3*ii+p] = -(e1-e2)/(2*delta); atom->x[i][p] = xsave; } atom->f = atom_f_save; printf("Force on i=%4d:\n",i); printf(" Position %20.10e %20.10e %20.10e\n", atom->x[i][0],atom->x[i][1],atom->x[i][2]); printf(" Exact %20.10e %20.10e %20.10e\n", atom->f[i][0],atom->f[i][1],atom->f[i][2]); printf(" Numerical %20.10e %20.10e %20.10e\n", ffloc[3*ii+0],ffloc[3*ii+1],ffloc[3*ii+2]); printf(" Boundary %20.10e %20.10e %20.10e\n", ffloc2[3*ii+0],ffloc2[3*ii+1],ffloc2[3*ii+2]); } delete[] ffloc2; delete[] ffloc; delete[] ffptr; delete[] ffwork; } if (0) { printf("\nForces MGPT:\n"); const int iimax = (listfull->inum < 10) ? listfull->inum : 10; for (int ii = 0; ii<iimax; ii++) { int i = listfull->ilist[ii]; printf("%4d = %20.10e %20.10e %20.10e\n", i,atom->f[i][0],atom->f[i][1],atom->f[i][2]); } printf("\n\n"); } if (vflag_fdotr) { //printf("##### Using virial_compute!!!\n"); virial_fdotr_compute(); } } void PairMGPT::allocate() { allocated = 1; int n = atom->ntypes; memory->create(setflag,n+1,n+1,"pair:setflag"); for (int i = 0; i <= n; i++) for (int j = 0; j <= n; j++) setflag[i][j] = 0; memory->create(cutsq,n+1,n+1,"pair:cutsq"); memory->create(cutghost,n+1,n+1,"pair:cutsq"); } /* ---------------------------------------------------------------------- global settings ------------------------------------------------------------------------- */ void PairMGPT::settings(int narg, char **/*arg*/) { if (narg != 0) error->all(__FILE__,__LINE__,"Illegal pair_style command"); } /* ---------------------------------------------------------------------- set coeffs for one or more type pairs ------------------------------------------------------------------------- */ void PairMGPT::coeff(int narg, char **arg) { int single_precision = 0; if (narg < 5) error->all(__FILE__,__LINE__, "Not enough arguments for mgpt (MGPT) pair coefficients."); if (!allocated) allocate(); // Make sure I,J args are * * if (strcmp(arg[0],"*") != 0 || strcmp(arg[1],"*") != 0) error->all(__FILE__,__LINE__,"Incorrect args for pair coefficients"); double vol; if (sscanf(arg[4], "%lg", &vol) != 1 || vol <= 0.0) error->all(__FILE__,__LINE__,"Invalid volume in mgpt (MGPT) pair coefficients."); volpres_flag = 1; single_precision = 0; /* Parse arguments */ { int volpres_tag = 0,precision_tag = 0,nbody_tag = 0; int iarg = 5; while (iarg < narg) { if (strcmp(arg[iarg],"volpress") == 0) { /* Volumetric pressure flag */ if (iarg+2 > narg) error->all(FLERR,"Incorrect args for pair coefficients"); if (strcmp(arg[iarg+1],"yes") == 0) volpres_flag = 1; else if (strcmp(arg[iarg+1],"no") == 0) volpres_flag = 0; else { char line[1024]; sprintf(line,"(In %s:%d) Invalid value for volumetric pressure argument.\n" "It should be \"volpress yes\" or \"volpress no\".\n" "The value is \"%s\".\n",__FILE__,__LINE__,arg[iarg+1]); error->all(__FILE__,__LINE__,line); } volpres_tag = 1; iarg += 2; if (comm->me == 0) printf("* volpress: volpres_flag = %d [%s %s]\n",volpres_flag,arg[iarg-2],arg[iarg-1]); } else if (strcmp(arg[iarg],"nbody") == 0) { if (iarg+2 > narg) error->all(FLERR,"Incorrect args for pair coefficients"); if (strspn(arg[iarg+1],"1234") == strlen(arg[iarg+1])) { nbody_flag = 0; for (int i = 0; i<4; i++) if (strchr(arg[iarg+1],'1'+i) != nullptr) { nbody_flag = nbody_flag + (1<<i); if (comm->me == 0) printf("Explicitly adding %d-tuple forces.\n",i+1); } } else { char line[1024]; sprintf(line,"(In %s:%d) Invalid value for nbody flag.\n" "It should be e.g. \"nbody=1234\" (for single, pair, triple, and quad forces/energiers)\n" "For e.g. only pair and triple forces/energies, use \"nbody=23\".\n" "The default is \"nbody=1234\".\n" "The current value is \"%s\".\n",__FILE__,__LINE__,arg[iarg+1]); error->all(__FILE__,__LINE__,line); } nbody_tag = 1; iarg += 2; } else if (strcmp(arg[iarg],"precision") == 0) { if (iarg+2 > narg) error->all(FLERR,"Incorrect args for pair coefficients"); if (strcmp(arg[iarg+1],"single") == 0) single_precision = 1; else if (strcmp(arg[iarg+1],"double") == 0) single_precision = 0; else { char line[1024]; sprintf(line,"(In %s:%d) Invalid value for precision argument.\n" "It should be \"precision single\" or \"precision double\".\n" "The value is \"%s\".\n",__FILE__,__LINE__,arg[iarg+1]); error->all(__FILE__,__LINE__,line); } precision_tag = 1; iarg += 2; if (comm->me == 0) printf("* precision: single_flag = %d [%s %s]\n",single_precision,arg[iarg-2],arg[iarg-1]); } else { char line[1024]; sprintf(line,"(In %s:%d) Invalid argument. Allowed arguments are:\n" " volpress {yes|no} , default = yes\n" " precision {single|double} , default = double\n" " nbody {[1234,]*} , default = whichever terms potential require\n" "The invalid argument is \"%s\".\n",__FILE__,__LINE__,arg[iarg]); error->all(__FILE__,__LINE__,line); } } if (comm->me == 0) printf("Volumetric pressure is %s.\n",volpres_flag ? "on" : "off"); if (comm->me == 0) { FILE *parmin_fp = utils::open_potential(arg[2],lmp,nullptr); FILE *potin_fp = utils::open_potential(arg[3],lmp,nullptr); if (parmin_fp == nullptr || potin_fp == nullptr) { char str[128]; sprintf(str,"Cannot open MGPT potential files %s %s",arg[2],arg[3]); error->one(FLERR,str); } fclose(parmin_fp); fclose(potin_fp); splinepot.readpot(arg[2],arg[3],vol); printf("evol0 = %.10e\n",splinepot.evol0); /* Set up default and requested nbody forces to include */ { int nbody_default = (1<<0) + (1<<1) + (1<<2) + (1<<3); if (splinepot.vd == 0.0 && splinepot.dvd == 0.0) nbody_default -= (1<<2); // No 3-body contributions if (splinepot.ve == 0.0 && splinepot.dve == 0.0) nbody_default -= (1<<3); // No 4-body contributions if (nbody_tag == 0) nbody_flag = nbody_default; if (nbody_flag != nbody_default) { printf("Warning: nbody=%d (suggested=%d) set to disregard multibody-forces in potential.\n", nbody_flag,nbody_default); } } } } MPI_Bcast(&nbody_flag,sizeof(nbody_flag),MPI_BYTE,0,world); /* Broadcast structure to all processes. In receiving processes, pointes will be screwed up. We allocate memory, and then broadcast contents of arrays. */ MPI_Bcast(&splinepot,sizeof(splinepot),MPI_BYTE,0,world); if (comm->me != 0) { splinepot.vpair_spline = new double[splinepot.nr-1][4]; splinepot.dvpair_spline = new double[splinepot.nr-1][4]; } MPI_Bcast(splinepot.vpair_spline,4*(splinepot.nr-1),MPI_DOUBLE,0,world); MPI_Bcast(splinepot.dvpair_spline,4*(splinepot.nr-1),MPI_DOUBLE,0,world); anorm3 = splinepot.anorm3; anorm4 = splinepot.anorm4; lmax = splinepot.lmax; lang = splinepot.lang; //ipot = splinepot.ipot; for (int i = 0; i<(int) (sizeof(ddl)/sizeof(double)); i++) ddl[i] = splinepot.ddl[i]; for (int i = 0; i<lmax; i++) { for (int j = 0; j<lmax; j++) del0.m[i+1][j+1] = 0.0; del0.m[i+1][i+1] = 1.0; } /* Set matrix param, cutoff, LAMMPS param */ Matrix::sz = lmax; rcrit = splinepot.rcrit; rmax = splinepot.rmax; cutoff = rmax; if (rcrit > rmax) cutoff = rcrit; // Set LAMMPS pair interaction flags. for (int i = 1; i <= atom->ntypes; i++) { for (int j = 1; j <= atom->ntypes; j++) { setflag[i][j] = 1; cutsq[i][j] = cutoff; cutghost[i][j] = cutoff; } } // Set atomic mass. for (int i = 1; i <= atom->ntypes; i++) atom->set_mass(FLERR,i, splinepot.mass); // Initialize linear algebra routines. linalg = mgpt_linalg(lmax,single_precision); if (comm->me == 0) printf("%s",linalg.msg); } /* ---------------------------------------------------------------------- init specific to this pair style ------------------------------------------------------------------------- */ void PairMGPT::init_style() { if (force->newton_pair == 0) error->all(__FILE__,__LINE__,"Pair style mgpt requires newton pair on."); // Need full neighbor list. int irequest_full = neighbor->request(this); neighbor->requests[irequest_full]->id = 1; neighbor->requests[irequest_full]->half = 0; neighbor->requests[irequest_full]->full = 1; neighbor->requests[irequest_full]->ghost = 1; // Also need half neighbor list. int irequest_half = neighbor->request(this); neighbor->requests[irequest_half]->id = 2; } /* ---------------------------------------------------------------------- neighbor callback to inform pair style of neighbor list to use half or full ------------------------------------------------------------------------- */ void PairMGPT::init_list(int id, NeighList *ptr) { if (id == 1) listfull = ptr; else if (id == 2) listhalf = ptr; } /* ---------------------------------------------------------------------- init for one type pair i,j and corresponding j,i ------------------------------------------------------------------------- */ double PairMGPT::init_one(int /*i*/, int /*j*/) { return cutoff; } /************************************************************************ **** REIMPLEMENTATION OF FL AND HAMLTN WITH ANALYTICAL DERIVATIVES **** ************************************************************************/ /* Reimplementation of bond length potential, including derivatives with respect to x,y, and z. */ void PairMGPT::fl_deriv_new(double r,double ri,double xhat,double yhat,double zhat, double &fl_0,double &fl_x,double &fl_y,double &fl_z, double &fl_rp,double &fl_p1,double &fl_r0,double &fl_al) { const double rp = splinepot.rp,p1 = splinepot.p1,r0 = splinepot.r00,al = splinepot.al; const int mode = splinepot.mode; const double pn = splinepot.pn; double t,tx,ty,tz,t_rp_ti,t_p1_ti; double s; /* // Original code double term; double pn=1.0; if (mode <= 4) term = pow(rp/r, p1); else term = exp(-p1*(pow(r/rp, pn) - 1.0)/pn); */ double rpi = 1.0/rp; if (mode <= 4) { t = pow(rp*ri,p1); s = -p1 * t * ri; t_rp_ti = p1*rpi; t_p1_ti = log(rp*ri); } else { if (pn == 1.0) { double p1_rpi = -p1*rpi; t = exp(p1 + r*p1_rpi); s = p1_rpi * t; t_rp_ti = -r*p1_rpi*rpi; t_p1_ti = 1.0 - r*rpi; } else { double pni = 1.0/pn; double rprpn = pow(r*rpi,pn); t = exp(-p1*pni*(rprpn - 1.0)); s = -p1*rprpn*ri * t; t_rp_ti = p1*rprpn*rpi; t_p1_ti = pni - pni*rprpn;// -pni*(rprpn - 1.0); } } tx = s * xhat; ty = s * yhat; tz = s * zhat; fl_rp = t_rp_ti; fl_p1 = t_p1_ti; if (r <= r0) { fl_0 = t; fl_x = tx; fl_y = ty; fl_z = tz; fl_r0 = 0.0; fl_al = 0.0; } else { double q,qx,qy,qz,exp_q,q_r0,q_al; double r0i,u; r0i = 1.0/r0; u = r*r0i - 1.0; q = al*u*u; s = 2*al*u*r0i; qx = s * xhat; qy = s * yhat; qz = s * zhat; q_r0 = -2.0*al*u*r*r0i*r0i; q_al = u*u; exp_q = exp(-q); if (mode <= 2) { fl_0 = exp_q * t; fl_x = exp_q*(tx - t*qx); fl_y = exp_q*(ty - t*qy); fl_z = exp_q*(tz - t*qz); fl_r0 = -q_r0; fl_al = -q_al; } else { fl_0 = exp_q * (1.0 + q) * t; fl_x = exp_q * (tx + q*(tx - t*qx)); fl_y = exp_q * (ty + q*(ty - t*qy)); fl_z = exp_q * (tz + q*(tz - t*qz)); fl_r0 = -q_r0 * q/(1.0 + q); fl_al = -q_al * q/(1.0 + q); } } } /* Macros to build elements of the bond matrix, and also its derivatives with repsect to x,y, and z. */ #define MAKE_ELEMENT_5(i,j) \ do { \ const double dl0 = del0.m[i][j]; \ const double dl4 = gsl_##i * gsl_##j; \ const double dl4x = gsl_##i##x * gsl_##j + gsl_##i * gsl_##j##x; \ const double dl4y = gsl_##i##y * gsl_##j + gsl_##i * gsl_##j##y; \ const double dl4z = gsl_##i##z * gsl_##j + gsl_##i * gsl_##j##z; \ \ const double tmp = w4*dl4 + w2*dl2 + w0*dl0; \ const double tmpx = w4*dl4x + w2*dl2x; \ const double tmpy = w4*dl4y + w2*dl2y; \ const double tmpz = w4*dl4z + w2*dl2z; \ const double tmpsum = tmpx*x + tmpy*y + tmpz*z; \ M [j][i] = M[i][j] = fl *tmp; \ Mx[j][i] = Mx[i][j] = fl_x*tmp + fl_ri*(tmpx - x*tmpsum); \ My[j][i] = My[i][j] = fl_y*tmp + fl_ri*(tmpy - y*tmpsum); \ Mz[j][i] = Mz[i][j] = fl_z*tmp + fl_ri*(tmpz - z*tmpsum); \ } while (0) #define MAKE_ELEMENT_7(i,j) \ do { \ const double dl0 = del0.m[i][j]; \ const double dl6 = gsl_##i * gsl_##j; \ const double dl6x = gsl_##i##x * gsl_##j + gsl_##i * gsl_##j##x; \ const double dl6y = gsl_##i##y * gsl_##j + gsl_##i * gsl_##j##y; \ const double dl6z = gsl_##i##z * gsl_##j + gsl_##i * gsl_##j##z; \ \ const double tmp = w6*dl6 + w4*dl4 + w2*dl2 + w0*dl0; \ const double tmpx = w6*dl6x + w4*dl4x + w2*dl2x; \ const double tmpy = w6*dl6y + w4*dl4y + w2*dl2y; \ const double tmpz = w6*dl6z + w4*dl4z + w2*dl2z; \ const double tmpsum = tmpx*x + tmpy*y + tmpz*z; \ M [j][i] = M[i][j] = fl *tmp; \ Mx[j][i] = Mx[i][j] = fl_x*tmp + fl_ri*(tmpx - x*tmpsum); \ My[j][i] = My[i][j] = fl_y*tmp + fl_ri*(tmpy - y*tmpsum); \ Mz[j][i] = Mz[i][j] = fl_z*tmp + fl_ri*(tmpz - z*tmpsum); \ } while (0) /* End of bond matrix macros */ /* Construction of bond matrix, and its derivatives with respect to the coordinates */ void PairMGPT::hamltn_5_raw(const double xin,const double yin,const double zin, double M [8][8],double Mx[8][8], double My[8][8],double Mz[8][8], double *fl_deriv_sum_p) { const double r = sqrt(xin*xin + yin*yin + zin*zin),ri = 1.0/r; const double x = xin*ri,y = yin*ri,z = zin*ri; // d-d // call delndd(x,y,z) const double x2 = x*x,y2 = y*y,z2 = z*z; const double xy = x*y,xz = x*z,yz = y*z; const double sr3 = sqrt(3.0),sr3i = 1.0/sr3; const double frac_1_3 = 1.0/3.0,frac_2_3 = 2.0/3.0,frac_4_3 = 4.0/3.0; const double ddl_1 = ddl[1],ddl_2 = ddl[2],ddl_3 = ddl[3]; const double w4 = ddl_1 - frac_4_3*ddl_2 + frac_1_3*ddl_3; const double w2 = ddl_2 - ddl_3; const double w0 = ddl_2; //del4 double gsl_1 ,gsl_2 ,gsl_3 ,gsl_4 ,gsl_5; double gsl_1x,gsl_2x,gsl_3x,gsl_4x,gsl_5x; double gsl_1y,gsl_2y,gsl_3y,gsl_4y,gsl_5y; double gsl_1z,gsl_2z,gsl_3z,gsl_4z,gsl_5z; double dl2,dl2x,dl2y,dl2z; double fl,fl_x,fl_y,fl_z,fl_ri; double fl_rp,fl_p1,fl_r0,fl_al; gsl_1 = 0.5*(3.0*z2 - 1.0); gsl_1x = 0.0; gsl_1y = 0.0; gsl_1z = 3.0*z; gsl_2 = sr3*xz; gsl_2x = sr3*z; gsl_2y = 0.0; gsl_2z = sr3*x; gsl_3 = sr3*yz; gsl_3x = 0.0; gsl_3y = sr3*z; gsl_3z = sr3*y; gsl_4 = sr3*(x2 - y2)*0.5; gsl_4x = sr3*x; gsl_4y = -sr3*y; gsl_4z = 0.0; gsl_5 = sr3*xy; gsl_5x = sr3*y; gsl_5y = sr3*x; gsl_5z = 0.0; // Compute bond length potential fl_deriv_new(r,ri,x,y,z,fl,fl_x,fl_y,fl_z , fl_rp,fl_p1,fl_r0,fl_al); fl_ri = fl*ri; *fl_deriv_sum_p = fl_rp*splinepot.drp + fl_p1*splinepot.dp1 + fl_r0*splinepot.dr00 + fl_al*splinepot.dal; // del2 //del2.m[1][1] = z2 - 2.0/3.0; dl2 = z2 - frac_2_3; dl2x = 0.0; dl2y = 0.0; dl2z = 2*z; MAKE_ELEMENT_5(1,1); //del2.m[1][2] = xz/sr3; dl2 = xz*sr3i; dl2x = z*sr3i; dl2y = 0.0; dl2z = x*sr3i; MAKE_ELEMENT_5(1,2); //del2.m[1][3] = yz/sr3; dl2 = yz*sr3i; dl2x = 0.0; dl2y = z*sr3i; dl2z = y*sr3i; MAKE_ELEMENT_5(1,3); //del2.m[1][4] = -(x2 - y2)*sr3i; dl2 = -(x2 - y2)*sr3i; dl2x = -2.0*sr3i*x; dl2y = 2.0*sr3i*y; dl2z = 0.0; MAKE_ELEMENT_5(1,4); //del2.m[1][5] = -2.0*xy*sr3i; dl2 = -2.0*xy*sr3i; dl2x = -2.0*y*sr3i; dl2y = -2.0*x*sr3i; dl2z = 0.0; MAKE_ELEMENT_5(1,5); //del2.m[2][2] = -y2; dl2 = -y2; dl2x = 0.0; dl2y = -2.0*y; dl2z = 0.0; MAKE_ELEMENT_5(2,2); //del2.m[2][3] = xy; dl2 = xy; dl2x = y; dl2y = x; dl2z = 0.0; MAKE_ELEMENT_5(2,3); //del2.m[2][4] = xz; dl2 = xz; dl2x = z; dl2y = 0.0; dl2z = x; MAKE_ELEMENT_5(2,4); //del2.m[2][5] = yz; dl2 = yz; dl2x = 0.0; dl2y = z; dl2z = y; MAKE_ELEMENT_5(2,5); //del2.m[3][3] = -x2; dl2 = -x2; dl2x = -2.0*x; dl2y = 0.0; dl2z = 0.0; MAKE_ELEMENT_5(3,3); //del2.m[3][4] = -yz; dl2 = -yz; dl2x = 0.0; dl2y = -z; dl2z = -y; MAKE_ELEMENT_5(3,4); //del2.m[3][5] = xz; dl2 = xz; dl2x = z; dl2y = 0.0; dl2z = x; MAKE_ELEMENT_5(3,5); //del2.m[4][4] = -z2; dl2 = -z2; dl2x = 0.0; dl2y = 0.0; dl2z = -2.0*z; MAKE_ELEMENT_5(4,4); //del2.m[4][5] = 0.0; dl2 = 0.0; dl2x = 0.0; dl2y = 0.0; dl2z = 0.0; MAKE_ELEMENT_5(4,5); //del2.m[5][5] = -z2; dl2 = -z2; dl2x = 0.0; dl2y = 0.0; dl2z = -2.0*z; MAKE_ELEMENT_5(5,5); } void PairMGPT::hamltn_7_raw(const double xin,const double yin,const double zin, double M [8][8],double Mx[8][8], double My[8][8],double Mz[8][8], double *fl_deriv_sum_p) { const double r = sqrt(xin*xin + yin*yin + zin*zin),ri = 1.0/r; const double x = xin*ri,y = yin*ri,z = zin*ri; // d-d // call delndd(x,y,z) const double x2 = x*x,y2 = y*y,z2 = z*z; const double xy = x*y,xz = x*z,yz = y*z; const double x4 = x2*x2,y4 = y2*y2; //const double sr3 = sqrt(3.0);//,sr3i = 1.0/sr3; //const double frac_1_3 = 1.0/3.0,frac_2_3 = 2.0/3.0,frac_4_3 = 4.0/3.0; const double sr01 = sqrt(0.1); const double sr015 = sqrt(0.15); const double sr024 = sqrt(0.24); const double sr0375 = sqrt(0.375); const double sr06 = sqrt(0.6); const double sr0625 = sqrt(0.625); const double sr09 = sqrt(0.9); const double sr15 = sqrt(1.5); const double sr24 = sqrt(2.4); const double sr36 = sqrt(3.6); const double sr375 = sqrt(3.75); const double sr96 = sqrt(9.6); const double sr150 = sqrt(15.0); const double ddl_1 = ddl[1],ddl_2 = ddl[2],ddl_3 = ddl[3],ddl_4 = ddl[4]; const double w6 = ddl_1 - 1.5*ddl_2 + 0.6*ddl_3 - 0.1*ddl_4; const double w4 = 0.625*ddl_2 - ddl_3 + 0.375*ddl_4; const double w2 = 0.625*(ddl_2 - ddl_4); const double w0 = 0.625*ddl_2 + 0.375*ddl_4; //del6 double gsl_1 ,gsl_2 ,gsl_3 ,gsl_4 ,gsl_5 ,gsl_6, gsl_7; double gsl_1x,gsl_2x,gsl_3x,gsl_4x,gsl_5x,gsl_6x,gsl_7x; double gsl_1y,gsl_2y,gsl_3y,gsl_4y,gsl_5y,gsl_6y,gsl_7y; double gsl_1z,gsl_2z,gsl_3z,gsl_4z,gsl_5z,gsl_6z,gsl_7z; double dl2,dl2x,dl2y,dl2z; double dl4,dl4x,dl4y,dl4z; double t1; double fl,fl_x,fl_y,fl_z,fl_ri; double fl_rp,fl_p1,fl_r0,fl_al; //gslf[1] = 0.5*(5.0*n2 - 3.0)*n; gsl_1 = 0.5*(5.0*z2 - 3.0)*z; gsl_1x = 0.0; gsl_1y = 0.0; gsl_1z = 7.5*z2 - 1.5; //gslf[2] = sr0375*(5.0*n2 - 1.0)*l; gsl_2 = sr0375*(5.0*z2 - 1.0)*x; gsl_2x = sr0375*(5.0*z2 - 1.0); gsl_2y = 0.0; gsl_2z = sr0375*10.0*xz; //gslf[3] = sr0375*(5.0*n2 - 1.0)*m; gsl_3 = sr0375*(5.0*z2 - 1.0)*y; gsl_3x = 0.0; gsl_3y = sr0375*(5.0*z2 - 1.0); gsl_3z = sr0375*10.0*yz; //gslf[4] = sr375*(l2 - m2)*n; gsl_4 = sr375*(x2 - y2)*z; gsl_4x = 2.0*sr375*xz; gsl_4y = -2.0*sr375*yz; gsl_4z = sr375*(x2 - y2); //gslf[5] = sr150*lm*n; gsl_5 = sr150*xy*z; gsl_5x = sr150*yz; gsl_5y = sr150*xz; gsl_5z = sr150*xy; //gslf[6] = sr0625*(l2 - 3.0*m2)*l; gsl_6 = sr0625*(x2 - 3.0*y2)*x; gsl_6x = 3.0*sr0625*(x2 - y2); gsl_6y = -6.0*sr0625*xy; gsl_6z = 0.0; //gslf[7] = sr0625*(3.0*l2 - m2)*m; gsl_7 = sr0625*(3.0*x2 - y2)*y; gsl_7x = 6.0*sr0625*xy; gsl_7y = 3.0*sr0625*(x2 - y2); gsl_7z = 0.0; // Compute bond length potential fl_deriv_new(r,ri,x,y,z,fl,fl_x,fl_y,fl_z , fl_rp,fl_p1,fl_r0,fl_al); fl_ri = fl*ri; *fl_deriv_sum_p = fl_rp*splinepot.drp + fl_p1*splinepot.dp1 + fl_r0*splinepot.dr00 + fl_al*splinepot.dal; // del2f //del2f.m[1][1] = 0.4*(3.0*n2 - 1.0); dl2 = 0.4*(3.0*z2 - 1.0); dl2x = 0.0; dl2y = 0.0; dl2z = 2.4*z; //del4f.m[1][1] = 0.60*(5.0*n2 - 4.0)*n2; dl4 = 0.60*(5.0*z2 - 4.0)*z2; dl4x = 0.0; dl4y = 0.0; dl4z = 0.60*(20.0*z2 - 8.0)*z; MAKE_ELEMENT_7(1,1); //del2f.m[1][2] = sr024*ln; dl2 = sr024*xz; dl2x = sr024*z; dl2y = 0.0; dl2z = sr024*x; //del4f.m[1][2] = sr024*(5.0*n2 - 2.0)*ln; dl4 = sr024*(5.0*z2 - 2.0)*xz; dl4x = sr024*(5.0*z2 - 2.0)*z; dl4y = 0.0; dl4z = sr024*(15.0*z2 - 2.0)*x; MAKE_ELEMENT_7(1,2); //del2f.m[1][3] = sr024*mn; dl2 = sr024*yz; dl2x = 0.0; dl2y = sr024*z; dl2z = sr024*y; //del4f.m[1][3] = sr024*(5.0*n2 - 2.0)*mn; dl4 = sr024*(5.0*z2 - 2.0)*yz; dl4x = 0.0; dl4y = sr024*(5.0*z2 - 2.0)*z; dl4z = sr024*(15.0*z2 - 2.0)*y; MAKE_ELEMENT_7(1,3); //del2f.m[1][4] = -sr06*(l2 - m2); dl2 = -sr06*(x2 - y2); dl2x = -2.0*sr06*x; dl2y = 2.0*sr06*y; dl2z = 0.0; //del4f.m[1][4] = -sr06*(l2 - m2)*n2; dl4 = -sr06*(x2 - y2)*z2; dl4x = -2.0*sr06*x*z2; dl4y = 2.0*sr06*y*z2; dl4z = -2.0*sr06*(x2 - y2)*z; MAKE_ELEMENT_7(1,4); //del2f.m[1][5] = -sr24*lm; dl2 = -sr24*xy; dl2x = -sr24*y; dl2y = -sr24*x; dl2z = 0.0; //del4f.m[1][5] = -sr24*lm*n2; dl4 = -sr24*xy*z2; dl4x = -sr24*y*z2; dl4y = -sr24*x*z2; dl4z = -2.0*sr24*xy*z; MAKE_ELEMENT_7(1,5); //del2f.m[1][6] = 0.0; dl2 = 0.0; dl2x = 0.0; dl2y = 0.0; dl2z = 0.0; //del4f.m[1][6] = sr36*(3.0*m2 - l2)*ln; dl4 = sr36*(3.0*y2 - x2)*xz; dl4x = 3.0*sr36*(y2 - x2)*z; dl4y = 6.0*sr36*y*xz; dl4z = sr36*(3.0*y2 - x2)*x; MAKE_ELEMENT_7(1,6); //del2f.m[1][7] = 0.0; dl2 = 0.0; dl2x = 0.0; dl2y = 0.0; dl2z = 0.0; //del4f.m[1][7] = -sr36*(3.0*l2 - m2)*mn; dl4 = -sr36*(3.0*x2 - y2)*yz; dl4x = -6.0*sr36*x*yz; dl4y = -3.0*sr36*(x2 - y2)*z; dl4z = -sr36*(3.0*x2 - y2)*y; MAKE_ELEMENT_7(1,7); //del2f.m[2][2] = 0.3*(1.0 - 4.0*m2 + n2); dl2 = 0.3 - 1.2*y2 + 0.3*z2; dl2x = 0.0; dl2y = -2.4*y; dl2z = 0.6*z; //del4f.m[2][2] = -0.4*l*l - 2.5*(m2 - 0.6*l2)*n2; dl4 = -0.4*x2 - 2.5*(y2 - 0.6*x2)*z2; dl4x = -0.8*x + 3.0*x*z2; dl4y = -5.0*y*z2; dl4z = -5.0*(y2 - 0.6*x2)*z; MAKE_ELEMENT_7(2,2); //del2f.m[2][3] = 1.2*lm; dl2 = 1.2*xy; dl2x = 1.2*y; dl2y = 1.2*x; dl2z = 0.0; //del4f.m[2][3] = 0.4*(10.0*n2 - 1.0)*lm; dl4 = (4.0*z2 - 0.4)*xy; dl4x = (4.0*z2 - 0.4)*y; dl4y = (4.0*z2 - 0.4)*x; dl4z = 8.0*z*xy; MAKE_ELEMENT_7(2,3); //del2f.m[2][4] = sr09*ln; dl2 = sr09*xz; dl2x = sr09*z; dl2y = 0.0; dl2z = sr09*x; //del4f.m[2][4] = sr01*(6.0*n2 - 8.0*m2 - 1.0)*ln; dl4 = sr01*(6.0*z2 - 8.0*y2 - 1.0)*xz; dl4x = sr01*(6.0*z2 - 8.0*y2 - 1.0)*z; dl4y = -16.0*sr01*y*xz; dl4z = sr01*(18.0*z2 - 8.0*y2 - 1.0)*x; MAKE_ELEMENT_7(2,4); //del2f.m[2][5] = sr09*mn; dl2 = sr09*yz; dl2x = 0.0; dl2y = sr09*z; dl2z = sr09*y; //del4f.m[2][5] = sr01*(2.0*n2 - 8.0*m2 + 3.0)*mn; dl4 = sr01*(2.0*z2 - 8.0*y2 + 3.0)*yz; dl4x = 0.0; dl4y = sr01*(2.0*z2 - 24.0*y2 + 3.0)*z; dl4z = sr01*(6.0*z2 - 8.0*y2 + 3.0)*y; MAKE_ELEMENT_7(2,5); //del2f.m[2][6] = -sr015*(l2 - m2); dl2 = -sr015*(x2 - y2); dl2x = -2.0*sr015*x; dl2y = 2.0*sr015*y; dl2z = 0.0; //del4f.m[2][6] = sr375*(l2 - m2 - 1.4*l4 + 1.2*l2*m2 + m4); dl4 = sr375*(x2 - y2 - 1.4*x4 + 1.2*x2*y2 + y4); dl4x = sr375*(2.0 - 5.6*x2 + 2.4*y2)*x; dl4y = sr375*(-2.0 + 2.4*x2 + 4.0*y2)*y; dl4z = 0.0; MAKE_ELEMENT_7(2,6); //del2f.m[2][7] = -sr06*lm; dl2 = -sr06*xy; dl2x = -sr06*y; dl2y = -sr06*x; dl2z = 0.0; //del4f.m[2][7] = sr96*(n2 - l2 + 0.25)*lm; dl4 = sr96*(z2 - x2 + 0.25)*xy; dl4x = sr96*(z2 - 3.0*x2 + 0.25)*y; dl4y = sr96*(z2 - x2 + 0.25)*x; dl4z = 2.0*sr96*z*xy; MAKE_ELEMENT_7(2,7); //del2f.m[3][3] = 0.30*(1.0 - 4.0*l2 + n2); dl2 = 0.3 - 1.2*x2 + 0.3*z2; dl2x = -2.4*x; dl2y = 0.0; dl2z = 0.6*z; //del4f.m[3][3] = -0.4*m2 - 2.5*(l2 - 0.6*m2)*n2; dl4 = -0.4*y2 - 2.5*(x2 - 0.6*y2)*z2; dl4x = -5.0*x*z2; dl4y = y*(3.0*z2 - 0.8); dl4z = -5.0*(x2 - 0.6*y2)*z; MAKE_ELEMENT_7(3,3); //del2f.m[3][4] = -sr09*mn; dl2 = -sr09*yz; dl2x = 0.0; dl2y = -sr09*z; dl2z = -sr09*y; //del4f.m[3][4] = -sr01*(6.0*n2 - 8.0*l2 - 1.0)*mn; dl4 = -sr01*(6.0*z2 - 8.0*x2 - 1.0)*yz; dl4x = 16.0*sr01*x*yz; dl4y = -sr01*(6.0*z2 - 8.0*x2 - 1.0)*z; dl4z = -sr01*(18.0*z2 - 8.0*x2 - 1.0)*y; MAKE_ELEMENT_7(3,4); //del2f.m[3][5] = sr09*ln; dl2 = sr09*xz; dl2x = sr09*z; dl2y = 0.0; dl2z = sr09*x; //del4f.m[3][5] = sr01*(2.0*n2 - 8.0*l2 + 3.0)*ln; dl4 = sr01*(2.0*z2 - 8.0*x2 + 3.0)*xz; dl4x = sr01*(2.0*z2 - 24.0*x2 + 3.0)*z; dl4y = 0.0; dl4z = sr01*(6.0*z2 - 8.0*x2 + 3.0)*x; MAKE_ELEMENT_7(3,5); //del2f.m[3][6] = sr06*lm; dl2 = sr06*xy; dl2x = sr06*y; dl2y = sr06*x; dl2z = 0.0; //del4f.m[3][6] = sr96*(m2 - n2 - 0.25)*lm; dl4 = sr96*(y2 - z2 - 0.25)*xy; dl4x = sr96*(y2 - z2 - 0.25)*y; dl4y = sr96*(3.0*y2 - z2 - 0.25)*x; dl4z = -2.0*sr96*z*xy; MAKE_ELEMENT_7(3,6); //del2f.m[3][7] = -sr015*(l2 - m2); dl2 = -sr015*(x2 - y2); dl2x = -2.0*sr015*x; dl2y = 2.0*sr015*y; dl2z = 0.0; //del4f.m[3][7] = sr375*(l2 - m2 + 1.4*m4 - 1.2*l2*m2 - l4); dl4 = sr375*(x2 - y2 + 1.4*y4 - 1.2*x2*y2 - x4); dl4x = sr375*(2.0 - 2.4*y2 - 4.0*x2)*x; dl4y = sr375*(-2.0 + 5.6*y2 - 2.4*x2)*y; dl4z = 0.0; MAKE_ELEMENT_7(3,7); //del2f.m[4][4] = 0.0; dl2 = 0.0; dl2x = 0.0; dl2y = 0.0; dl2z = 0.0; //del4f.m[4][4] = (2.0 - 3.0*n2)*n2 - 4.0*l2*m2; dl4 = (2.0 - 3.0*z2)*z2 - 4.0*x2*y2; dl4x = -8.0*x*y2; dl4y = -8.0*x2*y; dl4z = (4.0 - 12.0*z2)*z; MAKE_ELEMENT_7(4,4); //del2f.m[4][5] = 0.0; dl2 = 0.0; dl2x = 0.0; dl2y = 0.0; dl2z = 0.0; //del4f.m[4][5] = 2.0*(l2 - m2)*lm; dl4 = 2.0*(x2 - y2)*xy; dl4x = 2.0*(3.0*x2 - y2)*y; dl4y = 2.0*(x2 - 3.0*y2)*x; dl4z = 0.0; MAKE_ELEMENT_7(4,5); //del2f.m[4][6] = sr15*ln; dl2 = sr15*xz; dl2x = sr15*z; dl2y = 0.0; dl2z = sr15*x; //del4f.m[4][6] = -sr15*(2.0*n2 - 1.0)*ln; dl4 = -sr15*(2.0*z2 - 1.0)*xz; dl4x = -sr15*(2.0*z2 - 1.0)*z; dl4y = 0.0; dl4z = -sr15*(6.0*z2 - 1.0)*x; MAKE_ELEMENT_7(4,6); //del2f.m[4][7] = sr15*mn; dl2 = sr15*yz; dl2x = 0.0; dl2y = sr15*z; dl2z = sr15*y; //del4f.m[4][7] = -sr15*(2.0*n2 - 1.0)*mn; dl4 = -sr15*(2.0*z2 - 1.0)*yz; dl4x = 0.0; dl4y = -sr15*(2.0*z2 - 1.0)*z; dl4z = -sr15*(6.0*z2 - 1.0)*y; MAKE_ELEMENT_7(4,7); //del2f.m[5][5] = 0.0; dl2 = 0.0; dl2x = 0.0; dl2y = 0.0; dl2z = 0.0; //del4f.m[5][5] = -pow((2.0*n2 - 1.0),2) + 4.0*l2*m2; t1 = 2.0*z2 - 1.0; dl4 = -t1*t1 + 4.0*x2*y2; dl4x = 8.0*x*y2; dl4y = 8.0*x2*y; dl4z = -8.0*t1*z; MAKE_ELEMENT_7(5,5); //del2f.m[5][6] = -sr15*mn; dl2 = -sr15*yz; dl2x = 0.0; dl2y = -sr15*z; dl2z = -sr15*y; //del4f.m[5][6] = sr15*(2.0*n2 - 1.0)*mn; dl4 = sr15*(2.0*z2 - 1.0)*yz; dl4x = 0.0; dl4y = sr15*(2.0*z2 - 1.0)*z; dl4z = sr15*(6.0*z2 - 1.0)*y; MAKE_ELEMENT_7(5,6); //del2f.m[5][7] = sr15*ln; dl2 = sr15*xz; dl2x = sr15*z; dl2y = 0.0; dl2z = sr15*x; //del4f.m[5][7] = -sr15*(2.0*n2 - 1.0)*ln; dl4 = -sr15*(2.0*z2 - 1.0)*xz; dl4x = -sr15*(2.0*z2 - 1.0)*z; dl4y = 0.0; dl4z = -sr15*(6.0*z2 - 1.0)*x; MAKE_ELEMENT_7(5,7); //del2f.m[6][6] = -(3.0*n2 - 1.0)/2.0; dl2 = 0.5 - 1.5*z2; dl2x = 0.0; dl2y = 0.0; dl2z = -3.0*z; //del4f.m[6][6] = 1.5*(n2 - 1.0)*n2; dl4 = (1.5*z2 - 1.5)*z2; dl4x = 0.0; dl4y = 0.0; dl4z = (6.0*z2 - 3.0)*z; MAKE_ELEMENT_7(6,6); //del2f.m[6][7] = 0.0; dl2 = 0.0; dl2x = 0.0; dl2y = 0.0; dl2z = 0.0; //del4f.m[6][7] = 0.0; dl4 = 0.0; dl4x = 0.0; dl4y = 0.0; dl4z = 0.0; MAKE_ELEMENT_7(6,7); //del2f.m[7][7] = -(3.0*n2 - 1.0)/2.0; dl2 = 0.5 - 1.5*z2; dl2x = 0.0; dl2y = 0.0; dl2z = -3.0*z; //del4f.m[7][7] = 1.5*(n2 - 1.0)*n2; dl4 = (1.5*z2 - 1.5)*z2; dl4x = 0.0; dl4y = 0.0; dl4z = (6.0*z2 - 3.0)*z; MAKE_ELEMENT_7(7,7); } /************************************************************************/ /* ---------------------------------------------------------------------- * Fast Model Generalized Pseudopotential Theory (MGPT) interatomic * potential routine. * * Copyright (2015) Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * Written by Tomas Oppelstrup ([email protected]) and John Moriarty * ([email protected]) * LLNL-CODE-674031 All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (as published by the * Free Software Foundation) version 2, dated June 1991. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the * GNU General Public License for more details. * * LLNL Preamble Notice * A. This notice is required to be provided under our contract with the * U.S. Department of Energy (DOE). This work was performed under the auspices * of the DOE by Lawrence Livermore National Laboratory under Contract No. * DE-AC52-07NA27344. * * B. Neither the United States Government nor Lawrence Livermore National * Security, LLC nor any of their employees, makes any warranty, express or * implied, or assumes any liability or responsibility for the accuracy, * completeness, or usefulness of any information, apparatus, product, or * process disclosed, or represents that its use would not infringe * privately-owned rights. * * C. Also, reference herein to any specific commercial products, process, * or services by trade name, trademark, manufacturer or otherwise does not * necessarily constitute or imply its endorsement, recommendation, or * favoring by the United States Government or Lawrence Livermore National * Security, LLC. The views and opinions of authors expressed herein do not * necessarily state or reflect those of the United States Government or * Lawrence Livermore National Security, LLC, and shall not be used for * advertising or product endorsement purposes. ------------------------------------------------------------------------- */
1
31,444
what's the reasoning to keep these always true or dead-code blocks?
lammps-lammps
cpp
@@ -351,4 +351,13 @@ $(document).ready(function() { } }); } + + $('body').on('click', '.tab-buttons > div', function() { + if ($(this).next().length === 1) { + $('.widget-content').addClass('hide-zoom'); + } + else { + $('.widget-content').removeClass('hide-zoom'); + } + }); });
1
'use strict'; /* jshint undef: true, unused: true */ /* globals app, $, countlyGlobal, components, countlyCommon, countlySegmentation, countlyUserdata, CountlyHelpers, jQuery, countlyManagementView, Backbone */ app.addAppManagementView('push', jQuery.i18n.map['push.plugin-title'], countlyManagementView.extend({ initialize: function() { this.plugin = 'push'; this.templatePath = '/push/templates/push.html'; this.resetTemplateData(); }, resetTemplateData: function() { var c = this.config(); if (c.i && c.i._id) { this.templateData = { i: { _id: c.i._id, type: c.i.type, key: c.i.key, team: c.i.team, bundle: c.i.bundle, help: c.i.type === 'apn_universal' && c.i._id ? '<i class="fa fa-check-circle"></i>' + jQuery.i18n.map['mgmt-plugins.push.uploaded.p12'] : c.i.type === 'apn_token' ? '<i class="fa fa-check-circle"></i>' + jQuery.i18n.map['mgmt-plugins.push.uploaded.p8'] : '' // help: '<a href="' + countlyCommon.API_URL + '/i/pushes/download/' + c.i._id + '?api_key=' + countlyGlobal.member.api_key + '">' + jQuery.i18n.map['mgmt-plugins.push.uploaded'] + '</a>. ' + (c.i.type === 'apn_universal' ? (jQuery.i18n.map['mgmt-plugins.push.uploaded.bundle'] + ' ' + c.i.bundle) : '') } }; } else { this.templateData = { i: { type: 'apn_token', key: '', team: '', bundle: '', } }; } var t = c.a && c.a && c.a.key ? jQuery.i18n.map['mgmt-plugins.push.detected'] + ' ' + (c.a.key.length > 50 ? 'FCM' : 'GCM') : ''; this.templateData.a = { _id: c.a && c.a._id || '', key: c.a && c.a && c.a.key || '', help: c.a && c.a && c.a.key && c.a.key.length > 50 ? t : '', ehelp: c.a && c.a && c.a.key && c.a.key.length < 50 ? t : '' }; this.templateData.rate = { rate: c.rate && c.rate.rate || '', period: c.rate && c.rate.period || '' }; }, onChange: function(name, value) { if (name === 'i.type') { this.resetTemplateData(); countlyCommon.dot(this.templateData, name, value); this.render(); } else if (name === 'a.key' && value) { this.templateData.a.type = value.length > 100 ? 'fcm' : 'gcm'; this.el.find('input[name="a.type"]').val(this.templateData.a.type); } else if (name === 'i.pass' && !value) { delete this.templateData.i.pass; } }, isSaveAvailable: function() { var td = JSON.parse(JSON.stringify(this.templateData)), std = JSON.parse(this.savedTemplateData); if (td.i) { delete td.i.pass; } if (std.i) { delete std.i.pass; } return JSON.stringify(td) !== JSON.stringify(std); }, validate: function() { var i = this.config().i || {}, //a = this.config().a || {}, t = this.templateData; if (t.i.type) { if (t.i.file && t.i.file.length) { if (t.i.type === 'apn_token') { if (!t.i.key) { return jQuery.i18n.map['mgmt-plugins.push.error.nokey']; } if (!t.i.team) { return jQuery.i18n.map['mgmt-plugins.push.error.noteam']; } if (!t.i.bundle) { return jQuery.i18n.map['mgmt-plugins.push.error.nobundle']; } } } else { if (t.i.type === 'apn_token') { if ((t.i.key || '') !== (i.key || '') || (t.i.team || '') !== (i.team || '') || (t.i.bundle || '') !== (i.bundle || '')) { return jQuery.i18n.map['mgmt-plugins.push.error.nofile']; } } } } }, loadFile: function() { var data = JSON.parse(JSON.stringify(this.templateData)); if (data.i.file) { if (data.i.file.indexOf('.p8') === data.i.file.length - 3) { data.i.fileType = 'p8'; } else if (data.i.file.indexOf('.p12') === data.i.file.length - 4) { data.i.fileType = 'p12'; } else { return $.Deferred().reject('File type not supported'); } var d = new $.Deferred(), reader = new window.FileReader(); reader.addEventListener('load', function() { data.i.file = reader.result; d.resolve({push: data}); }); reader.addEventListener('error', d.reject.bind(d)); reader.readAsDataURL(this.el.find('input[name="i.file"]')[0].files[0]); return d.promise(); } else { return $.when({push: data}); } }, prepare: function() { // var text = jQuery.i18n.map["plugins.confirm"]; // var msg = { title: jQuery.i18n.map["plugins.processing"], message: jQuery.i18n.map["plugins.wait"], info: jQuery.i18n.map["plugins.hold-on"], sticky: true }; // CountlyHelpers.confirm(text, "popStyleGreen popStyleGreenWide", function (result) { // if (!result) { // return true; // } // CountlyHelpers.notify(msg); // app.activeView.togglePlugin(plugins); // },[jQuery.i18n.map["common.no-dont-continue"],jQuery.i18n.map["plugins.yes-i-want-to-apply-changes"]],{title:jQuery.i18n.map["plugins-apply-changes-to-plugins"],image:"apply-changes-to-plugins"}); return this.loadFile().then(function(data) { delete data.push.i.help; delete data.push.a.help; if (!data.push.i.file && !data.push.i._id) { data.push.i = null; } else if (data.push.i.file) { delete data.push.i._id; } if (!data.push.a.key) { data.push.a = null; } return data; }); } })); app.addPageScript('/drill#', function() { if (Array.isArray(countlyGlobal.member.restrict) && countlyGlobal.member.restrict.indexOf('#/messaging') !== -1) { return; } if (countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type === 'mobile') { if (countlyGlobal.member.global_admin || (countlyGlobal.member.admin_of && countlyGlobal.member.admin_of.indexOf(countlyCommon.ACTIVE_APP_ID) !== -1)) { var content = '<div class="item" id="action-create-message">' + '<div class="item-icon">' + '<span class="logo ion-chatbox-working"></span>' + '</div>' + '<div class="content">' + '<div class="title" data-localize="pu.send-message"></div>' + '<div class="subtitle" data-localize="pu.send-message-desc"></div>' + '</div>' + '</div>'; $('#actions-popup').append(content); app.localize(); $('#action-create-message').off('click').on('click', function() { var message = { apps: [countlyCommon.ACTIVE_APP_ID], drillConditions: countlySegmentation.getRequestData() }; // for (var k in filterData.dbFilter) { // if (k.indexOf('up.') === 0) message.conditions[k.substr(3).replace("cmp_","cmp.")] = filterData.dbFilter[k]; // } components.push.popup.show(message); app.recordEvent({ "key": "drill-action", "count": 1, "segmentation": {action: "push"} }); }); $('#bookmark-view').on('click', '.bookmark-action.send', function() { var filter = $(this).data('query'); var message = { apps: [countlyCommon.ACTIVE_APP_ID], drillConditions: filter }; // for (var k in filter) { // if (k.indexOf('up.') === 0) message.conditions[k.substr(3).replace("cmp_","cmp.")] = filter[k]; // } components.push.popup.show(message); }); } else { $('#drill-actions').remove('.btn-create-message'); } } }); /** * Modify user profile views with push additions **/ function modifyUserDetailsForPush() { if (Array.isArray(countlyGlobal.member.restrict) && countlyGlobal.member.restrict.indexOf('#/messaging') !== -1) { return; } if (Backbone.history.fragment.indexOf('manage/') === -1 && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type === 'mobile') { //check if it is profile view if (app.activeView.updateEngagement) { var userDetails = countlyUserdata.getUserdetails(); var tokens = [], platforms = [], test = false, prod = false; tokens = Object.keys(userDetails).filter(function(k) { return k.indexOf('tk') === 0; }).map(function(k) { return k.substr(2); }); if (userDetails.tkid || userDetails.tkia || userDetails.tkip) { platforms.push('i'); } if (userDetails.tkat || userDetails.tkap) { platforms.push('a'); } test = !!userDetails.tkid || !!userDetails.tkia || !!userDetails.tkat; prod = !!userDetails.tkip || !!userDetails.tkap; if (tokens.length && (countlyGlobal.member.global_admin || (countlyGlobal.member.admin_of && countlyGlobal.member.admin_of.indexOf(countlyCommon.ACTIVE_APP_ID) !== -1))) { if (!$('.btn-create-message').length) { $('#user-profile-detail-buttons .cly-button-menu').append('<div class="item btn-create-message" >' + jQuery.i18n.map['push.create'] + '</div>'); app.activeView.resetExportSubmenu(); } $('.btn-create-message').show().off('click').on('click', function() { if (platforms.length) { components.push.popup.show({ platforms: platforms, apps: [countlyCommon.ACTIVE_APP_ID], test: test && !prod, userConditions: {_id: app.userdetailsView.user_id} }); } else { CountlyHelpers.alert(jQuery.i18n.map['push.no-user-token'], 'red'); } }); if (!$('#userdata-info > tbody > tr:last-child table .user-property-push').length) { $('<tr class="user-property-push"><td class="text-left"><span>' + components.t('userdata.push') + '</span></td><td class="text-right"></td></tr>').appendTo($('#userdata-info > tbody > tr:last-child table tbody')); } $('#userdata-info > tbody > tr:last-child table .user-property-push td.text-right').html(tokens.map(function(t) { return components.t('pu.tk.' + t); }).join('<br />')); } else { $('#userdata-info > tbody > tr:last-child table .user-property-push').remove(); $('.btn-create-message').remove(); app.activeView.resetExportSubmenu(); } } else { //list view if (countlyGlobal.member.global_admin || (countlyGlobal.member.admin_of && countlyGlobal.member.admin_of.indexOf(countlyCommon.ACTIVE_APP_ID) !== -1)) { if (!$('.btn-create-message').length) { $('.widget-header').append($('<a class="icon-button green btn-header right btn-create-message" data-localize="push.create"></a>').text(jQuery.i18n.map['push.create'])); } $('.btn-create-message').off('click').on('click', function() { var q = app.userdataView.getExportQuery().query, filterData = {}; if (q) { try { filterData = JSON.parse(q); } catch (ignored) { //ignoring error } } components.push.popup.show({ apps: [countlyCommon.ACTIVE_APP_ID], userConditions: filterData }); }); } else { $('.btn-create-message').remove(); } } } } app.addRefreshScript('/users#', modifyUserDetailsForPush); app.addPageScript('/users#', modifyUserDetailsForPush); $(document).ready(function() { app.addMenuForType("mobile", "reach", {code: "push", text: "push.sidebar.section", icon: '<div class="logo ion-chatbox-working"></div>', priority: 10}); app.addSubMenuForType("mobile", "push", {code: "messaging", url: "#/messaging", text: "push.sidebar.overview", priority: 10}); if (app.configurationsView) { app.configurationsView.registerLabel("push", "push.plugin-title"); app.configurationsView.registerLabel("push.proxyhost", "push.proxyhost"); app.configurationsView.registerLabel("push.proxyport", "push.proxyport"); } var notes = countlyGlobal.member.notes; if (notes && notes.push && notes.push.gcm && notes.push.gcm !== true) { CountlyHelpers.notify({ type: 'error', title: jQuery.i18n.map['push.note.gcm.t'], message: jQuery.i18n.prop('push.note.gcm.m', notes.push.gcm.apps.map(function(a) { return a.name; }).join(', ')), sticky: true, onClick: function() { return $.ajax({ type: "GET", url: countlyCommon.API_URL + "/i/users/ack", data: { path: 'push.gcm' }, success: function() { notes.push.gcm = true; } }); } }); } });
1
13,541
It would be a good practice to do `.off('click', '.tab-buttons > div').on('click', '.tab-buttons > div', function() {`
Countly-countly-server
js
@@ -307,9 +307,12 @@ func buildControllerContext(ctx context.Context, opts *options.ControllerOptions KubeSharedInformerFactory: kubeSharedInformerFactory, SharedInformerFactory: sharedInformerFactory, GWShared: gwSharedInformerFactory, - Namespace: opts.Namespace, - Clock: clock.RealClock{}, - Metrics: metrics.New(log, clock.RealClock{}), + // TODO (@jakexks) / code reviewer: should this be automatically enabled or disabled based on discovering the gateway + // api or a flag? + GatewaySolverEnabled: true, + Namespace: opts.Namespace, + Clock: clock.RealClock{}, + Metrics: metrics.New(log, clock.RealClock{}), ACMEOptions: controller.ACMEOptions{ HTTP01SolverImage: opts.ACMEHTTP01SolverImage, HTTP01SolverResourceRequestCPU: HTTP01SolverResourceRequestCPU,
1
/* Copyright 2020 The cert-manager Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package app import ( "context" "errors" "fmt" "net" "net/http" "os" "time" "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" utilerrors "k8s.io/apimachinery/pkg/util/errors" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" clientv1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" "k8s.io/utils/clock" gwapi "sigs.k8s.io/gateway-api/apis/v1alpha1" gwclient "sigs.k8s.io/gateway-api/pkg/client/clientset/versioned" gwscheme "sigs.k8s.io/gateway-api/pkg/client/clientset/versioned/scheme" gwinformers "sigs.k8s.io/gateway-api/pkg/client/informers/externalversions" "github.com/jetstack/cert-manager/cmd/controller/app/options" cmdutil "github.com/jetstack/cert-manager/cmd/util" "github.com/jetstack/cert-manager/pkg/acme/accounts" clientset "github.com/jetstack/cert-manager/pkg/client/clientset/versioned" intscheme "github.com/jetstack/cert-manager/pkg/client/clientset/versioned/scheme" informers "github.com/jetstack/cert-manager/pkg/client/informers/externalversions" "github.com/jetstack/cert-manager/pkg/controller" shimgw "github.com/jetstack/cert-manager/pkg/controller/certificate-shim/gateways" "github.com/jetstack/cert-manager/pkg/controller/clusterissuers" dnsutil "github.com/jetstack/cert-manager/pkg/issuer/acme/dns/util" logf "github.com/jetstack/cert-manager/pkg/logs" "github.com/jetstack/cert-manager/pkg/metrics" "github.com/jetstack/cert-manager/pkg/util" ) const controllerAgentName = "cert-manager" // This sets the informer's resync period to 10 hours // following the controller-runtime defaults //and following discussion: https://github.com/kubernetes-sigs/controller-runtime/pull/88#issuecomment-408500629 const resyncPeriod = 10 * time.Hour func Run(opts *options.ControllerOptions, stopCh <-chan struct{}) error { rootCtx := cmdutil.ContextWithStopCh(context.Background(), stopCh) rootCtx, cancelContext := context.WithCancel(rootCtx) defer cancelContext() g, rootCtx := errgroup.WithContext(rootCtx) rootCtx = logf.NewContext(rootCtx, nil, "controller") log := logf.FromContext(rootCtx) ctx, kubeCfg, err := buildControllerContext(rootCtx, opts) if err != nil { return fmt.Errorf("error building controller context (options %v): %v", opts, err) } enabledControllers := opts.EnabledControllers() log.Info(fmt.Sprintf("enabled controllers: %s", enabledControllers.List())) ln, err := net.Listen("tcp", opts.MetricsListenAddress) if err != nil { return fmt.Errorf("failed to listen on prometheus address %s: %v", opts.MetricsListenAddress, err) } server := ctx.Metrics.NewServer(ln, opts.EnablePprof) g.Go(func() error { <-rootCtx.Done() // allow a timeout for graceful shutdown ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if err := server.Shutdown(ctx); err != nil { return err } return nil }) g.Go(func() error { log.V(logf.InfoLevel).Info("starting metrics server", "address", ln.Addr()) if err := server.Serve(ln); err != http.ErrServerClosed { return err } return nil }) elected := make(chan struct{}) if opts.LeaderElect { g.Go(func() error { log.V(logf.InfoLevel).Info("starting leader election") leaderElectionClient, err := kubernetes.NewForConfig(rest.AddUserAgent(kubeCfg, "leader-election")) if err != nil { return fmt.Errorf("error creating leader election client: %v", err) } errorCh := make(chan error, 1) if err := startLeaderElection(rootCtx, opts, leaderElectionClient, ctx.Recorder, leaderelection.LeaderCallbacks{ OnStartedLeading: func(_ context.Context) { close(elected) }, OnStoppedLeading: func() { select { case <-rootCtx.Done(): // context was canceled, just return return default: errorCh <- errors.New("leader election lost") } }, }); err != nil { return err } select { case err := <-errorCh: return err default: return nil } }) } else { close(elected) } select { case <-rootCtx.Done(): // Exit early if we are shutting down or if the errgroup has already exited with an error // Wait for error group to complete and return return g.Wait() case <-elected: // Don't launch the controllers unless we have been elected leader // Continue with setting up controller } for n, fn := range controller.Known() { log := log.WithValues("controller", n) // only run a controller if it's been enabled if !enabledControllers.Has(n) { log.V(logf.InfoLevel).Info("not starting controller as it's disabled") continue } // don't run clusterissuers controller if scoped to a single namespace if ctx.Namespace != "" && n == clusterissuers.ControllerName { log.V(logf.InfoLevel).Info("not starting controller as cert-manager has been scoped to a single namespace") continue } iface, err := fn(ctx) if err != nil { err = fmt.Errorf("error starting controller: %v", err) cancelContext() err2 := g.Wait() // Don't process errors, we already have an error if err2 != nil { return utilerrors.NewAggregate([]error{err, err2}) } return err } g.Go(func() error { log.V(logf.InfoLevel).Info("starting controller") // TODO: make this either a constant or a command line flag workers := 5 return iface.Run(workers, rootCtx.Done()) }) } log.V(logf.DebugLevel).Info("starting shared informer factories") ctx.SharedInformerFactory.Start(rootCtx.Done()) ctx.KubeSharedInformerFactory.Start(rootCtx.Done()) ctx.GWShared.Start(rootCtx.Done()) err = g.Wait() if err != nil { return fmt.Errorf("error starting controller: %v", err) } log.V(logf.InfoLevel).Info("control loops exited") return nil } func buildControllerContext(ctx context.Context, opts *options.ControllerOptions) (*controller.Context, *rest.Config, error) { log := logf.FromContext(ctx, "build-context") // Load the users Kubernetes config kubeCfg, err := clientcmd.BuildConfigFromFlags(opts.APIServerHost, opts.Kubeconfig) if err != nil { return nil, nil, fmt.Errorf("error creating rest config: %s", err.Error()) } kubeCfg.QPS = opts.KubernetesAPIQPS kubeCfg.Burst = opts.KubernetesAPIBurst // Add User-Agent to client kubeCfg = rest.AddUserAgent(kubeCfg, util.CertManagerUserAgent) // Create a cert-manager api client intcl, err := clientset.NewForConfig(kubeCfg) if err != nil { return nil, nil, fmt.Errorf("error creating internal group client: %s", err.Error()) } // Create a Kubernetes api client cl, err := kubernetes.NewForConfig(kubeCfg) if err != nil { return nil, nil, fmt.Errorf("error creating kubernetes client: %s", err.Error()) } // cert-manager will try watching the Gateway resources with an exponential // back-off, which allows the user to install the CRDs after cert-manager // itself. Let's let the user know that the CRDs have not been found yet. if opts.EnabledControllers().Has(shimgw.ControllerName) { d := cl.Discovery() resources, err := d.ServerResourcesForGroupVersion(gwapi.GroupVersion.String()) switch { case apierrors.IsNotFound(err): log.Info("the Gateway API CRDs do not seem to be present, cert-manager will keep retrying watching for them") case err != nil: return nil, nil, fmt.Errorf("while checking if the Gateway API CRD is installed: %s", err.Error()) case len(resources.APIResources) == 0: log.Info("the Gateway API CRDs do not seem to be present, cert-manager will keep retrying watching for them") } } // Create a GatewayAPI client. gwcl, err := gwclient.NewForConfig(kubeCfg) if err != nil { return nil, nil, fmt.Errorf("error creating kubernetes client: %s", err.Error()) } nameservers := opts.DNS01RecursiveNameservers if len(nameservers) == 0 { nameservers = dnsutil.RecursiveNameservers } log.V(logf.InfoLevel).WithValues("nameservers", nameservers).Info("configured acme dns01 nameservers") HTTP01SolverResourceRequestCPU, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceRequestCPU) if err != nil { return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceRequestCPU: %s", err.Error()) } HTTP01SolverResourceRequestMemory, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceRequestMemory) if err != nil { return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceRequestMemory: %s", err.Error()) } HTTP01SolverResourceLimitsCPU, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceLimitsCPU) if err != nil { return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceLimitsCPU: %s", err.Error()) } HTTP01SolverResourceLimitsMemory, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceLimitsMemory) if err != nil { return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceLimitsMemory: %s", err.Error()) } // Create event broadcaster // Add cert-manager types to the default Kubernetes Scheme so Events can be // logged properly intscheme.AddToScheme(scheme.Scheme) gwscheme.AddToScheme(scheme.Scheme) log.V(logf.DebugLevel).Info("creating event broadcaster") eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(logf.WithInfof(log.V(logf.DebugLevel)).Infof) eventBroadcaster.StartRecordingToSink(&clientv1.EventSinkImpl{Interface: cl.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(intcl, resyncPeriod, informers.WithNamespace(opts.Namespace)) kubeSharedInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(cl, resyncPeriod, kubeinformers.WithNamespace(opts.Namespace)) gwSharedInformerFactory := gwinformers.NewSharedInformerFactoryWithOptions(gwcl, resyncPeriod, gwinformers.WithNamespace(opts.Namespace)) acmeAccountRegistry := accounts.NewDefaultRegistry() return &controller.Context{ RootContext: ctx, StopCh: ctx.Done(), RESTConfig: kubeCfg, Client: cl, CMClient: intcl, GWClient: gwcl, DiscoveryClient: cl.Discovery(), Recorder: recorder, KubeSharedInformerFactory: kubeSharedInformerFactory, SharedInformerFactory: sharedInformerFactory, GWShared: gwSharedInformerFactory, Namespace: opts.Namespace, Clock: clock.RealClock{}, Metrics: metrics.New(log, clock.RealClock{}), ACMEOptions: controller.ACMEOptions{ HTTP01SolverImage: opts.ACMEHTTP01SolverImage, HTTP01SolverResourceRequestCPU: HTTP01SolverResourceRequestCPU, HTTP01SolverResourceRequestMemory: HTTP01SolverResourceRequestMemory, HTTP01SolverResourceLimitsCPU: HTTP01SolverResourceLimitsCPU, HTTP01SolverResourceLimitsMemory: HTTP01SolverResourceLimitsMemory, DNS01CheckAuthoritative: !opts.DNS01RecursiveNameserversOnly, DNS01Nameservers: nameservers, AccountRegistry: acmeAccountRegistry, DNS01CheckRetryPeriod: opts.DNS01CheckRetryPeriod, }, IssuerOptions: controller.IssuerOptions{ ClusterIssuerAmbientCredentials: opts.ClusterIssuerAmbientCredentials, IssuerAmbientCredentials: opts.IssuerAmbientCredentials, ClusterResourceNamespace: opts.ClusterResourceNamespace, }, IngressShimOptions: controller.IngressShimOptions{ DefaultIssuerName: opts.DefaultIssuerName, DefaultIssuerKind: opts.DefaultIssuerKind, DefaultIssuerGroup: opts.DefaultIssuerGroup, DefaultAutoCertificateAnnotations: opts.DefaultAutoCertificateAnnotations, }, CertificateOptions: controller.CertificateOptions{ EnableOwnerRef: opts.EnableCertificateOwnerRef, CopiedAnnotationPrefixes: opts.CopiedAnnotationPrefixes, }, SchedulerOptions: controller.SchedulerOptions{ MaxConcurrentChallenges: opts.MaxConcurrentChallenges, }, }, kubeCfg, nil } func startLeaderElection(ctx context.Context, opts *options.ControllerOptions, leaderElectionClient kubernetes.Interface, recorder record.EventRecorder, callbacks leaderelection.LeaderCallbacks) error { // Identity used to distinguish between multiple controller manager instances id, err := os.Hostname() if err != nil { return fmt.Errorf("error getting hostname: %v", err) } // Set up Multilock for leader election. This Multilock is here for the // transitionary period from configmaps to leases see // https://github.com/kubernetes-sigs/controller-runtime/pull/1144#discussion_r480173688 lockName := "cert-manager-controller" lc := resourcelock.ResourceLockConfig{ Identity: id + "-external-cert-manager-controller", EventRecorder: recorder, } ml, err := resourcelock.New(resourcelock.ConfigMapsLeasesResourceLock, opts.LeaderElectionNamespace, lockName, leaderElectionClient.CoreV1(), leaderElectionClient.CoordinationV1(), lc, ) if err != nil { return fmt.Errorf("error creating leader election lock: %v", err) } // Try and become the leader and start controller manager loops le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ Lock: ml, LeaseDuration: opts.LeaderElectionLeaseDuration, RenewDeadline: opts.LeaderElectionRenewDeadline, RetryPeriod: opts.LeaderElectionRetryPeriod, ReleaseOnCancel: true, Callbacks: callbacks, }) if err != nil { return err } le.Run(ctx) return nil }
1
28,712
I suggest that we rely on `--controllers='*,gateway-shim'` for now, and we can then move from `--controllers='*,gateway-shim'` to automatically enabling the Gateway API support on startup using the discovery API in 1.6 or 1.7, what do you think? Note that the logic I wrote in e5436df521015057e77de3fe02c174ea8a863b93 should also use this `GatewaySolverEnabled` flag I think.
jetstack-cert-manager
go
@@ -1180,6 +1180,7 @@ public class BesuCommand implements DefaultCommandValues, Runnable { try { configureLogging(true); + instantiateSignatureAlgorithmFactory(); configureNativeLibs(); logger.info("Starting Besu version: {}", BesuInfo.nodeName(identityString)); // Need to create vertx after cmdline has been parsed, such that metricsSystem is configurable
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.cli; import static com.google.common.base.Preconditions.checkNotNull; import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; import static org.hyperledger.besu.cli.DefaultCommandValues.getDefaultBesuDataPath; import static org.hyperledger.besu.cli.config.NetworkName.MAINNET; import static org.hyperledger.besu.cli.util.CommandLineUtils.DEPENDENCY_WARNING_MSG; import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH; import static org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration.DEFAULT_GRAPHQL_HTTP_PORT; import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration.DEFAULT_JSON_RPC_PORT; import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_JSON_RPC_APIS; import static org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration.DEFAULT_WEBSOCKET_PORT; import static org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration.QIP714_DEFAULT_BLOCK; import static org.hyperledger.besu.metrics.BesuMetricCategory.DEFAULT_METRIC_CATEGORIES; import static org.hyperledger.besu.metrics.MetricsProtocol.PROMETHEUS; import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PORT; import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PUSH_PORT; import static org.hyperledger.besu.nat.kubernetes.KubernetesNatManager.DEFAULT_BESU_SERVICE_NAME_FILTER; import org.hyperledger.besu.BesuInfo; import org.hyperledger.besu.Runner; import org.hyperledger.besu.RunnerBuilder; import org.hyperledger.besu.chainexport.RlpBlockExporter; import org.hyperledger.besu.chainimport.JsonBlockImporter; import org.hyperledger.besu.chainimport.RlpBlockImporter; import org.hyperledger.besu.cli.config.EthNetworkConfig; import org.hyperledger.besu.cli.config.NetworkName; import org.hyperledger.besu.cli.converter.MetricCategoryConverter; import org.hyperledger.besu.cli.converter.PercentageConverter; import org.hyperledger.besu.cli.converter.RpcApisConverter; import org.hyperledger.besu.cli.custom.CorsAllowedOriginsProperty; import org.hyperledger.besu.cli.custom.JsonRPCAllowlistHostsProperty; import org.hyperledger.besu.cli.custom.RpcAuthFileValidator; import org.hyperledger.besu.cli.error.BesuExceptionHandler; import org.hyperledger.besu.cli.options.unstable.DataStorageOptions; import org.hyperledger.besu.cli.options.unstable.DnsOptions; import org.hyperledger.besu.cli.options.unstable.EthProtocolOptions; import org.hyperledger.besu.cli.options.unstable.EthstatsOptions; import org.hyperledger.besu.cli.options.unstable.LauncherOptions; import org.hyperledger.besu.cli.options.unstable.MetricsCLIOptions; import org.hyperledger.besu.cli.options.unstable.MiningOptions; import org.hyperledger.besu.cli.options.unstable.NatOptions; import org.hyperledger.besu.cli.options.unstable.NativeLibraryOptions; import org.hyperledger.besu.cli.options.unstable.NetworkingOptions; import org.hyperledger.besu.cli.options.unstable.RPCOptions; import org.hyperledger.besu.cli.options.unstable.SynchronizerOptions; import org.hyperledger.besu.cli.options.unstable.TransactionPoolOptions; import org.hyperledger.besu.cli.presynctasks.PreSynchronizationTaskRunner; import org.hyperledger.besu.cli.presynctasks.PrivateDatabaseMigrationPreSyncTask; import org.hyperledger.besu.cli.subcommands.PasswordSubCommand; import org.hyperledger.besu.cli.subcommands.PublicKeySubCommand; import org.hyperledger.besu.cli.subcommands.RetestethSubCommand; import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand; import org.hyperledger.besu.cli.subcommands.operator.OperatorSubCommand; import org.hyperledger.besu.cli.subcommands.rlp.RLPSubCommand; import org.hyperledger.besu.cli.util.BesuCommandCustomFactory; import org.hyperledger.besu.cli.util.CommandLineUtils; import org.hyperledger.besu.cli.util.ConfigOptionSearchAndRunHandler; import org.hyperledger.besu.cli.util.VersionProvider; import org.hyperledger.besu.config.GenesisConfigFile; import org.hyperledger.besu.config.GenesisConfigOptions; import org.hyperledger.besu.config.GoQuorumOptions; import org.hyperledger.besu.config.experimental.ExperimentalEIPs; import org.hyperledger.besu.controller.BesuController; import org.hyperledger.besu.controller.BesuControllerBuilder; import org.hyperledger.besu.controller.TargetingGasLimitCalculator; import org.hyperledger.besu.crypto.KeyPair; import org.hyperledger.besu.crypto.KeyPairSecurityModule; import org.hyperledger.besu.crypto.KeyPairUtil; import org.hyperledger.besu.crypto.NodeKey; import org.hyperledger.besu.crypto.SignatureAlgorithmFactory; import org.hyperledger.besu.crypto.SignatureAlgorithmType; import org.hyperledger.besu.enclave.EnclaveFactory; import org.hyperledger.besu.enclave.GoQuorumEnclave; import org.hyperledger.besu.ethereum.api.ApiConfiguration; import org.hyperledger.besu.ethereum.api.ImmutableApiConfiguration; import org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration; import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration; import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi; import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis; import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration; import org.hyperledger.besu.ethereum.api.tls.FileBasedPasswordProvider; import org.hyperledger.besu.ethereum.api.tls.TlsClientAuthConfiguration; import org.hyperledger.besu.ethereum.api.tls.TlsConfiguration; import org.hyperledger.besu.ethereum.blockcreation.GasLimitCalculator; import org.hyperledger.besu.ethereum.chain.Blockchain; import org.hyperledger.besu.ethereum.core.Address; import org.hyperledger.besu.ethereum.core.GoQuorumPrivacyParameters; import org.hyperledger.besu.ethereum.core.Hash; import org.hyperledger.besu.ethereum.core.MiningParameters; import org.hyperledger.besu.ethereum.core.PrivacyParameters; import org.hyperledger.besu.ethereum.core.Wei; import org.hyperledger.besu.ethereum.eth.sync.SyncMode; import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration; import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration; import org.hyperledger.besu.ethereum.mainnet.precompiles.AbstractAltBnPrecompiledContract; import org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration; import org.hyperledger.besu.ethereum.p2p.peers.EnodeDnsConfiguration; import org.hyperledger.besu.ethereum.p2p.peers.EnodeURL; import org.hyperledger.besu.ethereum.p2p.peers.StaticNodesParser; import org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration; import org.hyperledger.besu.ethereum.permissioning.LocalPermissioningConfiguration; import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration; import org.hyperledger.besu.ethereum.permissioning.PermissioningConfigurationBuilder; import org.hyperledger.besu.ethereum.permissioning.SmartContractPermissioningConfiguration; import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProvider; import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProviderBuilder; import org.hyperledger.besu.ethereum.storage.StorageProvider; import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider; import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder; import org.hyperledger.besu.ethereum.worldstate.DefaultWorldStateArchive; import org.hyperledger.besu.ethereum.worldstate.PrunerConfiguration; import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive; import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage; import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage; import org.hyperledger.besu.metrics.BesuMetricCategory; import org.hyperledger.besu.metrics.MetricCategoryRegistryImpl; import org.hyperledger.besu.metrics.MetricsProtocol; import org.hyperledger.besu.metrics.MetricsSystemFactory; import org.hyperledger.besu.metrics.ObservableMetricsSystem; import org.hyperledger.besu.metrics.StandardMetricCategory; import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration; import org.hyperledger.besu.metrics.vertx.VertxMetricsAdapterFactory; import org.hyperledger.besu.nat.NatMethod; import org.hyperledger.besu.plugin.services.BesuConfiguration; import org.hyperledger.besu.plugin.services.BesuEvents; import org.hyperledger.besu.plugin.services.MetricsSystem; import org.hyperledger.besu.plugin.services.PicoCLIOptions; import org.hyperledger.besu.plugin.services.SecurityModuleService; import org.hyperledger.besu.plugin.services.StorageService; import org.hyperledger.besu.plugin.services.exception.StorageException; import org.hyperledger.besu.plugin.services.metrics.MetricCategory; import org.hyperledger.besu.plugin.services.metrics.MetricCategoryRegistry; import org.hyperledger.besu.plugin.services.securitymodule.SecurityModule; import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactory; import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBPlugin; import org.hyperledger.besu.services.BesuEventsImpl; import org.hyperledger.besu.services.BesuPluginContextImpl; import org.hyperledger.besu.services.PicoCLIOptionsImpl; import org.hyperledger.besu.services.SecurityModuleServiceImpl; import org.hyperledger.besu.services.StorageServiceImpl; import org.hyperledger.besu.services.kvstore.InMemoryStoragePlugin; import org.hyperledger.besu.util.NetworkUtility; import org.hyperledger.besu.util.PermissioningConfigurationValidator; import org.hyperledger.besu.util.number.Fraction; import org.hyperledger.besu.util.number.Percentage; import org.hyperledger.besu.util.number.PositiveNumber; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.math.BigInteger; import java.net.InetAddress; import java.net.SocketException; import java.net.URI; import java.net.UnknownHostException; import java.nio.file.Path; import java.time.Clock; import java.util.ArrayList; import java.util.Base64; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.OptionalLong; import java.util.Set; import java.util.TreeMap; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import com.google.common.base.Suppliers; import com.google.common.collect.ImmutableMap; import com.google.common.io.Files; import com.google.common.io.Resources; import io.vertx.core.Vertx; import io.vertx.core.VertxOptions; import io.vertx.core.json.DecodeException; import io.vertx.core.metrics.MetricsOptions; import net.consensys.quorum.mainnet.launcher.LauncherManager; import net.consensys.quorum.mainnet.launcher.config.ImmutableLauncherConfig; import net.consensys.quorum.mainnet.launcher.exception.LauncherException; import net.consensys.quorum.mainnet.launcher.util.ParseArgsHelper; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.config.Configurator; import org.apache.tuweni.bytes.Bytes; import org.apache.tuweni.units.bigints.UInt256; import picocli.CommandLine; import picocli.CommandLine.AbstractParseResultHandler; import picocli.CommandLine.Command; import picocli.CommandLine.ExecutionException; import picocli.CommandLine.Option; import picocli.CommandLine.ParameterException; @SuppressWarnings("FieldCanBeLocal") // because Picocli injected fields report false positives @Command( description = "This command runs the Besu Ethereum client full node.", abbreviateSynopsis = true, name = "besu", mixinStandardHelpOptions = true, versionProvider = VersionProvider.class, header = "Usage:", synopsisHeading = "%n", descriptionHeading = "%nDescription:%n%n", optionListHeading = "%nOptions:%n", footerHeading = "%n", footer = "Besu is licensed under the Apache License 2.0") public class BesuCommand implements DefaultCommandValues, Runnable { @SuppressWarnings("PrivateStaticFinalLoggers") // non-static for testing private final Logger logger; private CommandLine commandLine; private final Supplier<RlpBlockImporter> rlpBlockImporter; private final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory; private final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory; // Unstable CLI options final NetworkingOptions unstableNetworkingOptions = NetworkingOptions.create(); final SynchronizerOptions unstableSynchronizerOptions = SynchronizerOptions.create(); final EthProtocolOptions unstableEthProtocolOptions = EthProtocolOptions.create(); final MetricsCLIOptions unstableMetricsCLIOptions = MetricsCLIOptions.create(); final TransactionPoolOptions unstableTransactionPoolOptions = TransactionPoolOptions.create(); private final EthstatsOptions unstableEthstatsOptions = EthstatsOptions.create(); private final DataStorageOptions unstableDataStorageOptions = DataStorageOptions.create(); private final DnsOptions unstableDnsOptions = DnsOptions.create(); private final MiningOptions unstableMiningOptions = MiningOptions.create(); private final NatOptions unstableNatOptions = NatOptions.create(); private final NativeLibraryOptions unstableNativeLibraryOptions = NativeLibraryOptions.create(); private final RPCOptions unstableRPCOptions = RPCOptions.create(); final LauncherOptions unstableLauncherOptions = LauncherOptions.create(); private final RunnerBuilder runnerBuilder; private final BesuController.Builder controllerBuilderFactory; private final BesuPluginContextImpl besuPluginContext; private final StorageServiceImpl storageService; private final SecurityModuleServiceImpl securityModuleService; private final Map<String, String> environment; private final MetricCategoryRegistryImpl metricCategoryRegistry = new MetricCategoryRegistryImpl(); private final MetricCategoryConverter metricCategoryConverter = new MetricCategoryConverter(); // Public IP stored to prevent having to research it each time we need it. private InetAddress autoDiscoveredDefaultIP = null; private final PreSynchronizationTaskRunner preSynchronizationTaskRunner = new PreSynchronizationTaskRunner(); private final Set<Integer> allocatedPorts = new HashSet<>(); // CLI options defined by user at runtime. // Options parsing is done with CLI library Picocli https://picocli.info/ // While this variable is never read it is needed for the PicoCLI to create // the config file option that is read elsewhere. @SuppressWarnings("UnusedVariable") @CommandLine.Option( names = {CONFIG_FILE_OPTION_NAME}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "TOML config file (default: none)") private final File configFile = null; @CommandLine.Option( names = {"--data-path"}, paramLabel = MANDATORY_PATH_FORMAT_HELP, description = "The path to Besu data directory (default: ${DEFAULT-VALUE})") final Path dataPath = getDefaultBesuDataPath(this); // Genesis file path with null default option if the option // is not defined on command line as this default is handled by Runner // to use mainnet json file from resources as indicated in the // default network option // Then we have no control over genesis default value here. @CommandLine.Option( names = {"--genesis-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Genesis file. Setting this option makes --network option ignored and requires --network-id to be set.") private final File genesisFile = null; @CommandLine.Option( names = {"--node-private-key-file"}, paramLabel = MANDATORY_PATH_FORMAT_HELP, description = "The node's private key file (default: a file named \"key\" in the Besu data folder)") private final File nodePrivateKeyFile = null; @Option( names = "--identity", paramLabel = "<String>", description = "Identification for this node in the Client ID", arity = "1") private final Optional<String> identityString = Optional.empty(); // Completely disables P2P within Besu. @Option( names = {"--p2p-enabled"}, description = "Enable P2P functionality (default: ${DEFAULT-VALUE})", arity = "1") private final Boolean p2pEnabled = true; // Boolean option to indicate if peers should NOT be discovered, default to // false indicates that // the peers should be discovered by default. // // This negative option is required because of the nature of the option that is // true when // added on the command line. You can't do --option=false, so false is set as // default // and you have not to set the option at all if you want it false. // This seems to be the only way it works with Picocli. // Also many other software use the same negative option scheme for false // defaults // meaning that it's probably the right way to handle disabling options. @Option( names = {"--discovery-enabled"}, description = "Enable P2P discovery (default: ${DEFAULT-VALUE})", arity = "1") private final Boolean peerDiscoveryEnabled = true; // A list of bootstrap nodes can be passed // and a hardcoded list will be used otherwise by the Runner. // NOTE: we have no control over default value here. @Option( names = {"--bootnodes"}, paramLabel = "<enode://id@host:port>", description = "Comma separated enode URLs for P2P discovery bootstrap. " + "Default is a predefined list.", split = ",", arity = "0..*") private final List<String> bootNodes = null; @Option( names = {"--max-peers"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Maximum P2P connections that can be established (default: ${DEFAULT-VALUE})") private final Integer maxPeers = DEFAULT_MAX_PEERS; @Option( names = {"--remote-connections-limit-enabled"}, description = "Whether to limit the number of P2P connections initiated remotely. (default: ${DEFAULT-VALUE})") private final Boolean isLimitRemoteWireConnectionsEnabled = true; @Option( names = {"--remote-connections-max-percentage"}, paramLabel = MANDATORY_DOUBLE_FORMAT_HELP, description = "The maximum percentage of P2P connections that can be initiated remotely. Must be between 0 and 100 inclusive. (default: ${DEFAULT-VALUE})", arity = "1", converter = PercentageConverter.class) private final Integer maxRemoteConnectionsPercentage = Fraction.fromFloat(DEFAULT_FRACTION_REMOTE_WIRE_CONNECTIONS_ALLOWED) .toPercentage() .getValue(); @Option( names = {"--random-peer-priority-enabled"}, description = "Allow for incoming connections to be prioritized randomly. This will prevent (typically small, stable) networks from forming impenetrable peer cliques. (default: ${DEFAULT-VALUE})") private final Boolean randomPeerPriority = false; @Option( names = {"--banned-node-ids", "--banned-node-id"}, paramLabel = MANDATORY_NODE_ID_FORMAT_HELP, description = "A list of node IDs to ban from the P2P network.", split = ",", arity = "1..*") void setBannedNodeIds(final List<String> values) { try { bannedNodeIds = values.stream() .filter(value -> !value.isEmpty()) .map(EnodeURL::parseNodeId) .collect(Collectors.toList()); } catch (final IllegalArgumentException e) { throw new ParameterException( commandLine, "Invalid ids supplied to '--banned-node-ids'. " + e.getMessage()); } } private Collection<Bytes> bannedNodeIds = new ArrayList<>(); @Option( names = {"--sync-mode"}, paramLabel = MANDATORY_MODE_FORMAT_HELP, description = "Synchronization mode, possible values are ${COMPLETION-CANDIDATES} (default: FAST if a --network is supplied and privacy isn't enabled. FULL otherwise.)") private SyncMode syncMode = null; @Option( names = {"--fast-sync-min-peers"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Minimum number of peers required before starting fast sync. (default: ${DEFAULT-VALUE})") private final Integer fastSyncMinPeerCount = FAST_SYNC_MIN_PEER_COUNT; @Option( names = {"--network"}, paramLabel = MANDATORY_NETWORK_FORMAT_HELP, description = "Synchronize against the indicated network, possible values are ${COMPLETION-CANDIDATES}." + " (default: MAINNET)") private final NetworkName network = null; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--p2p-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Ip address this node advertises to its peers (default: ${DEFAULT-VALUE})", arity = "1") private String p2pHost = autoDiscoverDefaultIP().getHostAddress(); @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--p2p-interface"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "The network interface address on which this node listens for P2P communication (default: ${DEFAULT-VALUE})", arity = "1") private String p2pInterface = NetworkUtility.INADDR_ANY; @Option( names = {"--p2p-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port on which to listen for P2P communication (default: ${DEFAULT-VALUE})", arity = "1") private final Integer p2pPort = EnodeURL.DEFAULT_LISTENING_PORT; @Option( names = {"--nat-method"}, description = "Specify the NAT circumvention method to be used, possible values are ${COMPLETION-CANDIDATES}." + " NONE disables NAT functionality. (default: ${DEFAULT-VALUE})") private final NatMethod natMethod = DEFAULT_NAT_METHOD; @Option( names = {"--network-id"}, paramLabel = "<BIG INTEGER>", description = "P2P network identifier. (default: the selected network chain ID or custom genesis chain ID)", arity = "1") private final BigInteger networkId = null; @Option( names = {"--graphql-http-enabled"}, description = "Set to start the GraphQL HTTP service (default: ${DEFAULT-VALUE})") private final Boolean isGraphQLHttpEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--graphql-http-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Host for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})", arity = "1") private String graphQLHttpHost = autoDiscoverDefaultIP().getHostAddress(); @Option( names = {"--graphql-http-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})", arity = "1") private final Integer graphQLHttpPort = DEFAULT_GRAPHQL_HTTP_PORT; @Option( names = {"--graphql-http-cors-origins"}, description = "Comma separated origin domain URLs for CORS validation (default: none)") private final CorsAllowedOriginsProperty graphQLHttpCorsAllowedOrigins = new CorsAllowedOriginsProperty(); @Option( names = {"--rpc-http-enabled"}, description = "Set to start the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})") private final Boolean isRpcHttpEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--rpc-http-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Host for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})", arity = "1") private String rpcHttpHost = autoDiscoverDefaultIP().getHostAddress(); @Option( names = {"--rpc-http-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})", arity = "1") private final Integer rpcHttpPort = DEFAULT_JSON_RPC_PORT; @Option( names = {"--rpc-http-max-active-connections"}, description = "Maximum number of HTTP connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.", arity = "1") private final Integer rpcHttpMaxConnections = DEFAULT_HTTP_MAX_CONNECTIONS; // A list of origins URLs that are accepted by the JsonRpcHttpServer (CORS) @Option( names = {"--rpc-http-cors-origins"}, description = "Comma separated origin domain URLs for CORS validation (default: none)") private final CorsAllowedOriginsProperty rpcHttpCorsAllowedOrigins = new CorsAllowedOriginsProperty(); @Option( names = {"--rpc-http-api", "--rpc-http-apis"}, paramLabel = "<api name>", split = ",", arity = "1..*", converter = RpcApisConverter.class, description = "Comma separated list of APIs to enable on JSON-RPC HTTP service (default: ${DEFAULT-VALUE})") private final Collection<RpcApi> rpcHttpApis = DEFAULT_JSON_RPC_APIS; @Option( names = {"--rpc-http-authentication-enabled"}, description = "Require authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})") private final Boolean isRpcHttpAuthenticationEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @CommandLine.Option( names = {"--rpc-http-authentication-credentials-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Storage file for JSON-RPC HTTP authentication credentials (default: ${DEFAULT-VALUE})", arity = "1") private String rpcHttpAuthenticationCredentialsFile = null; @CommandLine.Option( names = {"--rpc-http-authentication-jwt-public-key-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "JWT public key file for JSON-RPC HTTP authentication", arity = "1") private final File rpcHttpAuthenticationPublicKeyFile = null; @Option( names = {"--rpc-http-tls-enabled"}, description = "Enable TLS for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})") private final Boolean isRpcHttpTlsEnabled = false; @Option( names = {"--rpc-http-tls-keystore-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Keystore (PKCS#12) containing key/certificate for the JSON-RPC HTTP service. Required if TLS is enabled.") private final Path rpcHttpTlsKeyStoreFile = null; @Option( names = {"--rpc-http-tls-keystore-password-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "File containing password to unlock keystore for the JSON-RPC HTTP service. Required if TLS is enabled.") private final Path rpcHttpTlsKeyStorePasswordFile = null; @Option( names = {"--rpc-http-tls-client-auth-enabled"}, description = "Enable TLS client authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})") private final Boolean isRpcHttpTlsClientAuthEnabled = false; @Option( names = {"--rpc-http-tls-known-clients-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Path to file containing clients certificate common name and fingerprint for client authentication") private final Path rpcHttpTlsKnownClientsFile = null; @Option( names = {"--rpc-http-tls-ca-clients-enabled"}, description = "Enable to accept clients certificate signed by a valid CA for client authentication (default: ${DEFAULT-VALUE})") private final Boolean isRpcHttpTlsCAClientsEnabled = false; @Option( names = {"--rpc-ws-enabled"}, description = "Set to start the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})") private final Boolean isRpcWsEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--rpc-ws-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Host for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})", arity = "1") private String rpcWsHost = autoDiscoverDefaultIP().getHostAddress(); @Option( names = {"--rpc-ws-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})", arity = "1") private final Integer rpcWsPort = DEFAULT_WEBSOCKET_PORT; @Option( names = {"--rpc-ws-max-active-connections"}, description = "Maximum number of WebSocket connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.", arity = "1") private final Integer rpcWsMaxConnections = DEFAULT_WS_MAX_CONNECTIONS; @Option( names = {"--rpc-ws-api", "--rpc-ws-apis"}, paramLabel = "<api name>", split = ",", arity = "1..*", converter = RpcApisConverter.class, description = "Comma separated list of APIs to enable on JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})") private final List<RpcApi> rpcWsApis = DEFAULT_JSON_RPC_APIS; @Option( names = {"--rpc-ws-authentication-enabled"}, description = "Require authentication for the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})") private final Boolean isRpcWsAuthenticationEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @CommandLine.Option( names = {"--rpc-ws-authentication-credentials-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Storage file for JSON-RPC WebSocket authentication credentials (default: ${DEFAULT-VALUE})", arity = "1") private String rpcWsAuthenticationCredentialsFile = null; @CommandLine.Option( names = {"--rpc-ws-authentication-jwt-public-key-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "JWT public key file for JSON-RPC WebSocket authentication", arity = "1") private final File rpcWsAuthenticationPublicKeyFile = null; @Option( names = {"--privacy-tls-enabled"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Enable TLS for connecting to privacy enclave (default: ${DEFAULT-VALUE})") private final Boolean isPrivacyTlsEnabled = false; @Option( names = "--privacy-tls-keystore-file", paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Path to a PKCS#12 formatted keystore; used to enable TLS on inbound connections.") private final Path privacyKeyStoreFile = null; @Option( names = "--privacy-tls-keystore-password-file", paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Path to a file containing the password used to decrypt the keystore.") private final Path privacyKeyStorePasswordFile = null; @Option( names = "--privacy-tls-known-enclave-file", paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Path to a file containing the fingerprints of the authorized privacy enclave.") private final Path privacyTlsKnownEnclaveFile = null; @Option( names = {"--metrics-enabled"}, description = "Set to start the metrics exporter (default: ${DEFAULT-VALUE})") private final Boolean isMetricsEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--metrics-protocol"}, description = "Metrics protocol, one of PROMETHEUS, OPENTELEMETRY or NONE. (default: ${DEFAULT-VALUE})") private MetricsProtocol metricsProtocol = PROMETHEUS; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--metrics-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Host for the metrics exporter to listen on (default: ${DEFAULT-VALUE})", arity = "1") private String metricsHost = autoDiscoverDefaultIP().getHostAddress(); @Option( names = {"--metrics-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port for the metrics exporter to listen on (default: ${DEFAULT-VALUE})", arity = "1") private final Integer metricsPort = DEFAULT_METRICS_PORT; @Option( names = {"--metrics-category", "--metrics-categories"}, paramLabel = "<category name>", split = ",", arity = "1..*", description = "Comma separated list of categories to track metrics for (default: ${DEFAULT-VALUE})") private final Set<MetricCategory> metricCategories = DEFAULT_METRIC_CATEGORIES; @Option( names = {"--metrics-push-enabled"}, description = "Enable the metrics push gateway integration (default: ${DEFAULT-VALUE})") private final Boolean isMetricsPushEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--metrics-push-host"}, paramLabel = MANDATORY_HOST_FORMAT_HELP, description = "Host of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})", arity = "1") private String metricsPushHost = autoDiscoverDefaultIP().getHostAddress(); @Option( names = {"--metrics-push-port"}, paramLabel = MANDATORY_PORT_FORMAT_HELP, description = "Port of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})", arity = "1") private final Integer metricsPushPort = DEFAULT_METRICS_PUSH_PORT; @Option( names = {"--metrics-push-interval"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Interval in seconds to push metrics when in push mode (default: ${DEFAULT-VALUE})", arity = "1") private final Integer metricsPushInterval = 15; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--metrics-push-prometheus-job"}, description = "Job name to use when in push mode (default: ${DEFAULT-VALUE})", arity = "1") private String metricsPrometheusJob = "besu-client"; @Option( names = {"--host-allowlist"}, paramLabel = "<hostname>[,<hostname>...]... or * or all", description = "Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})", defaultValue = "localhost,127.0.0.1") private final JsonRPCAllowlistHostsProperty hostsAllowlist = new JsonRPCAllowlistHostsProperty(); @Option( names = {"--host-whitelist"}, hidden = true, paramLabel = "<hostname>[,<hostname>...]... or * or all", description = "Deprecated in favor of --host-allowlist. Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})") private final JsonRPCAllowlistHostsProperty hostsWhitelist = new JsonRPCAllowlistHostsProperty(); @Option( names = {"--logging", "-l"}, paramLabel = "<LOG VERBOSITY LEVEL>", description = "Logging verbosity levels: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL") private final Level logLevel = null; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) @Option( names = {"--color-enabled"}, description = "Force color output to be enabled/disabled (default: colorized only if printing to console)") private static Boolean colorEnabled = null; @Option( names = {"--reorg-logging-threshold"}, description = "How deep a chain reorganization must be in order for it to be logged (default: ${DEFAULT-VALUE})") private final Long reorgLoggingThreshold = 6L; @Option( names = {"--miner-enabled"}, description = "Set if node will perform mining (default: ${DEFAULT-VALUE})") private final Boolean isMiningEnabled = false; @Option( names = {"--miner-stratum-enabled"}, description = "Set if node will perform Stratum mining (default: ${DEFAULT-VALUE})") private final Boolean iStratumMiningEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--miner-stratum-host"}, description = "Host for Stratum network mining service (default: ${DEFAULT-VALUE})") private String stratumNetworkInterface = "0.0.0.0"; @Option( names = {"--miner-stratum-port"}, description = "Stratum port binding (default: ${DEFAULT-VALUE})") private final Integer stratumPort = 8008; @Option( names = {"--miner-coinbase"}, description = "Account to which mining rewards are paid. You must specify a valid coinbase if " + "mining is enabled using --miner-enabled option", arity = "1") private final Address coinbase = null; @Option( names = {"--min-gas-price"}, description = "Minimum price (in Wei) offered by a transaction for it to be included in a mined " + "block (default: ${DEFAULT-VALUE})", arity = "1") private final Wei minTransactionGasPrice = DEFAULT_MIN_TRANSACTION_GAS_PRICE; @Option( names = {"--rpc-tx-feecap"}, description = "Maximum transaction fees (in Wei) accepted for transaction submitted through RPC (default: ${DEFAULT-VALUE})", arity = "1") private final Wei txFeeCap = DEFAULT_RPC_TX_FEE_CAP; @Option( names = {"--min-block-occupancy-ratio"}, description = "Minimum occupancy ratio for a mined block (default: ${DEFAULT-VALUE})", arity = "1") private final Double minBlockOccupancyRatio = DEFAULT_MIN_BLOCK_OCCUPANCY_RATIO; @Option( names = {"--miner-extra-data"}, description = "A hex string representing the (32) bytes to be included in the extra data " + "field of a mined block (default: ${DEFAULT-VALUE})", arity = "1") private final Bytes extraData = DEFAULT_EXTRA_DATA; @Option( names = {"--pruning-enabled"}, description = "Enable disk-space saving optimization that removes old state that is unlikely to be required (default: ${DEFAULT-VALUE})") private final Boolean pruningEnabled = false; @Option( names = {"--permissions-nodes-config-file-enabled"}, description = "Enable node level permissions (default: ${DEFAULT-VALUE})") private final Boolean permissionsNodesEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @CommandLine.Option( names = {"--permissions-nodes-config-file"}, description = "Node permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)") private String nodePermissionsConfigFile = null; @Option( names = {"--permissions-accounts-config-file-enabled"}, description = "Enable account level permissions (default: ${DEFAULT-VALUE})") private final Boolean permissionsAccountsEnabled = false; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @CommandLine.Option( names = {"--permissions-accounts-config-file"}, description = "Account permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)") private String accountPermissionsConfigFile = null; @Option( names = {"--permissions-nodes-contract-address"}, description = "Address of the node permissioning smart contract", arity = "1") private final Address permissionsNodesContractAddress = null; @Option( names = {"--permissions-nodes-contract-version"}, description = "Version of the EEA Node Permissioning interface (default: ${DEFAULT-VALUE})") private final Integer permissionsNodesContractVersion = 1; @Option( names = {"--permissions-nodes-contract-enabled"}, description = "Enable node level permissions via smart contract (default: ${DEFAULT-VALUE})") private final Boolean permissionsNodesContractEnabled = false; @Option( names = {"--permissions-accounts-contract-address"}, description = "Address of the account permissioning smart contract", arity = "1") private final Address permissionsAccountsContractAddress = null; @Option( names = {"--permissions-accounts-contract-enabled"}, description = "Enable account level permissions via smart contract (default: ${DEFAULT-VALUE})") private final Boolean permissionsAccountsContractEnabled = false; @Option( names = {"--privacy-enabled"}, description = "Enable private transactions (default: ${DEFAULT-VALUE})") private final Boolean isPrivacyEnabled = false; @Option( names = {"--privacy-multi-tenancy-enabled"}, description = "Enable multi-tenant private transactions (default: ${DEFAULT-VALUE})") private final Boolean isPrivacyMultiTenancyEnabled = false; @Option( names = {"--revert-reason-enabled"}, description = "Enable passing the revert reason back through TransactionReceipts (default: ${DEFAULT-VALUE})") private final Boolean isRevertReasonEnabled = false; @Option( names = {"--required-blocks", "--required-block"}, paramLabel = "BLOCK=HASH", description = "Block number and hash peers are required to have.", arity = "*", split = ",") private final Map<Long, Hash> requiredBlocks = new HashMap<>(); @Option( names = {"--privacy-url"}, description = "The URL on which the enclave is running") private final URI privacyUrl = PrivacyParameters.DEFAULT_ENCLAVE_URL; @Option( names = {"--privacy-public-key-file"}, description = "The enclave's public key file") private final File privacyPublicKeyFile = null; @Option( names = {"--privacy-precompiled-address"}, description = "The address to which the privacy pre-compiled contract will be mapped (default: ${DEFAULT-VALUE})", hidden = true) private final Integer privacyPrecompiledAddress = Address.PRIVACY; @Option( names = {"--privacy-marker-transaction-signing-key-file"}, description = "The name of a file containing the private key used to sign privacy marker transactions. If unset, each will be signed with a random key.") private final Path privacyMarkerTransactionSigningKeyPath = null; @Option( names = {"--privacy-enable-database-migration"}, description = "Enable private database metadata migration (default: ${DEFAULT-VALUE})") private final Boolean migratePrivateDatabase = false; @Option( names = {"--privacy-flexible-groups-enabled", "--privacy-onchain-groups-enabled"}, description = "Enable flexible (onchain) privacy groups (default: ${DEFAULT-VALUE})") private final Boolean isFlexiblePrivacyGroupsEnabled = false; @Option( names = {"--target-gas-limit"}, description = "Sets target gas limit per block. If set each block's gas limit will approach this setting over time if the current gas limit is different.") private final Long targetGasLimit = null; @Option( names = {"--tx-pool-max-size"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Maximum number of pending transactions that will be kept in the transaction pool (default: ${DEFAULT-VALUE})", arity = "1") private final Integer txPoolMaxSize = TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS; @Option( names = {"--tx-pool-hashes-max-size"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Maximum number of pending transaction hashes that will be kept in the transaction pool (default: ${DEFAULT-VALUE})", arity = "1") private final Integer pooledTransactionHashesSize = TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS_HASHES; @Option( names = {"--tx-pool-retention-hours"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, description = "Maximum retention period of pending transactions in hours (default: ${DEFAULT-VALUE})", arity = "1") private final Integer pendingTxRetentionPeriod = TransactionPoolConfiguration.DEFAULT_TX_RETENTION_HOURS; @Option( names = {"--tx-pool-price-bump"}, paramLabel = MANDATORY_INTEGER_FORMAT_HELP, converter = PercentageConverter.class, description = "Price bump percentage to replace an already existing transaction (default: ${DEFAULT-VALUE})", arity = "1") private final Integer priceBump = TransactionPoolConfiguration.DEFAULT_PRICE_BUMP.getValue(); @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @Option( names = {"--key-value-storage"}, description = "Identity for the key-value storage to be used.", arity = "1") private String keyValueStorageName = DEFAULT_KEY_VALUE_STORAGE_NAME; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) @Option( names = {"--security-module"}, paramLabel = "<NAME>", description = "Identity for the Security Module to be used.", arity = "1") private String securityModuleName = DEFAULT_SECURITY_MODULE; @Option( names = {"--auto-log-bloom-caching-enabled"}, description = "Enable automatic log bloom caching (default: ${DEFAULT-VALUE})", arity = "1") private final Boolean autoLogBloomCachingEnabled = true; @Option( names = {"--override-genesis-config"}, paramLabel = "NAME=VALUE", description = "Overrides configuration values in the genesis file. Use with care.", arity = "*", hidden = true, split = ",") private final Map<String, String> genesisConfigOverrides = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); @Option( names = {"--pruning-blocks-retained"}, defaultValue = "1024", paramLabel = "<INTEGER>", description = "Minimum number of recent blocks for which to keep entire world state (default: ${DEFAULT-VALUE})", arity = "1") private final Integer pruningBlocksRetained = PrunerConfiguration.DEFAULT_PRUNING_BLOCKS_RETAINED; @Option( names = {"--pruning-block-confirmations"}, defaultValue = "10", paramLabel = "<INTEGER>", description = "Minimum number of confirmations on a block before marking begins (default: ${DEFAULT-VALUE})", arity = "1") private final Integer pruningBlockConfirmations = PrunerConfiguration.DEFAULT_PRUNING_BLOCK_CONFIRMATIONS; @CommandLine.Option( names = {"--pid-path"}, paramLabel = MANDATORY_PATH_FORMAT_HELP, description = "Path to PID file (optional)") private final Path pidPath = null; @CommandLine.Option( names = {"--api-gas-price-blocks"}, description = "Number of blocks to consider for eth_gasPrice (default: ${DEFAULT-VALUE})") private final Long apiGasPriceBlocks = 100L; @CommandLine.Option( names = {"--api-gas-price-percentile"}, description = "Percentile value to measure for eth_gasPrice (default: ${DEFAULT-VALUE})") private final Double apiGasPricePercentile = 50.0; @CommandLine.Option( names = {"--api-gas-price-max"}, description = "Maximum gas price for eth_gasPrice (default: ${DEFAULT-VALUE})") private final Long apiGasPriceMax = 500_000_000_000L; @Option( names = {"--goquorum-compatibility-enabled"}, hidden = true, description = "Start Besu in GoQuorum compatibility mode (default: ${DEFAULT-VALUE})") private final Boolean isGoQuorumCompatibilityMode = false; @CommandLine.Option( names = {"--static-nodes-file"}, paramLabel = MANDATORY_FILE_FORMAT_HELP, description = "Specifies the static node file containing the static nodes for this node to connect to") private final Path staticNodesFile = null; @SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings. @CommandLine.Option( names = {"--discovery-dns-url"}, description = "Specifies the URL to use for DNS discovery") private String discoveryDnsUrl = null; private EthNetworkConfig ethNetworkConfig; private JsonRpcConfiguration jsonRpcConfiguration; private GraphQLConfiguration graphQLConfiguration; private WebSocketConfiguration webSocketConfiguration; private ApiConfiguration apiConfiguration; private MetricsConfiguration metricsConfiguration; private Optional<PermissioningConfiguration> permissioningConfiguration; private Collection<EnodeURL> staticNodes; private BesuController besuController; private BesuConfiguration pluginCommonConfiguration; private final Supplier<ObservableMetricsSystem> metricsSystem = Suppliers.memoize(() -> MetricsSystemFactory.create(metricsConfiguration())); private Vertx vertx; private EnodeDnsConfiguration enodeDnsConfiguration; private KeyValueStorageProvider keyValueStorageProvider; public BesuCommand( final Logger logger, final Supplier<RlpBlockImporter> rlpBlockImporter, final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory, final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory, final RunnerBuilder runnerBuilder, final BesuController.Builder controllerBuilderFactory, final BesuPluginContextImpl besuPluginContext, final Map<String, String> environment) { this( logger, rlpBlockImporter, jsonBlockImporterFactory, rlpBlockExporterFactory, runnerBuilder, controllerBuilderFactory, besuPluginContext, environment, new StorageServiceImpl(), new SecurityModuleServiceImpl()); } @VisibleForTesting protected BesuCommand( final Logger logger, final Supplier<RlpBlockImporter> rlpBlockImporter, final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory, final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory, final RunnerBuilder runnerBuilder, final BesuController.Builder controllerBuilderFactory, final BesuPluginContextImpl besuPluginContext, final Map<String, String> environment, final StorageServiceImpl storageService, final SecurityModuleServiceImpl securityModuleService) { this.logger = logger; this.rlpBlockImporter = rlpBlockImporter; this.rlpBlockExporterFactory = rlpBlockExporterFactory; this.jsonBlockImporterFactory = jsonBlockImporterFactory; this.runnerBuilder = runnerBuilder; this.controllerBuilderFactory = controllerBuilderFactory; this.besuPluginContext = besuPluginContext; this.environment = environment; this.storageService = storageService; this.securityModuleService = securityModuleService; pluginCommonConfiguration = new BesuCommandConfigurationService(); besuPluginContext.addService(BesuConfiguration.class, pluginCommonConfiguration); } public void parse( final AbstractParseResultHandler<List<Object>> resultHandler, final BesuExceptionHandler exceptionHandler, final InputStream in, final String... args) { commandLine = new CommandLine(this, new BesuCommandCustomFactory(besuPluginContext)) .setCaseInsensitiveEnumValuesAllowed(true); enableExperimentalEIPs(); addSubCommands(resultHandler, in); registerConverters(); handleUnstableOptions(); preparePlugins(); parse(resultHandler, exceptionHandler, args); } @Override public void run() { try { configureLogging(true); configureNativeLibs(); logger.info("Starting Besu version: {}", BesuInfo.nodeName(identityString)); // Need to create vertx after cmdline has been parsed, such that metricsSystem is configurable vertx = createVertx(createVertxOptions(metricsSystem.get())); final BesuCommand controller = validateOptions().configure().controller(); preSynchronizationTaskRunner.runTasks(controller.besuController); controller.startPlugins().startSynchronization(); } catch (final Exception e) { throw new ParameterException(this.commandLine, e.getMessage(), e); } } @VisibleForTesting void setBesuConfiguration(final BesuConfiguration pluginCommonConfiguration) { this.pluginCommonConfiguration = pluginCommonConfiguration; } private void enableExperimentalEIPs() { // Usage of static command line flags is strictly reserved for experimental EIPs commandLine.addMixin("experimentalEIPs", ExperimentalEIPs.class); } private void addSubCommands( final AbstractParseResultHandler<List<Object>> resultHandler, final InputStream in) { commandLine.addSubcommand( BlocksSubCommand.COMMAND_NAME, new BlocksSubCommand( rlpBlockImporter, jsonBlockImporterFactory, rlpBlockExporterFactory, resultHandler.out())); commandLine.addSubcommand( PublicKeySubCommand.COMMAND_NAME, new PublicKeySubCommand(resultHandler.out(), this::buildNodeKey)); commandLine.addSubcommand( PasswordSubCommand.COMMAND_NAME, new PasswordSubCommand(resultHandler.out())); commandLine.addSubcommand(RetestethSubCommand.COMMAND_NAME, new RetestethSubCommand()); commandLine.addSubcommand( RLPSubCommand.COMMAND_NAME, new RLPSubCommand(resultHandler.out(), in)); commandLine.addSubcommand( OperatorSubCommand.COMMAND_NAME, new OperatorSubCommand(resultHandler.out())); } private void registerConverters() { commandLine.registerConverter(Address.class, Address::fromHexStringStrict); commandLine.registerConverter(Bytes.class, Bytes::fromHexString); commandLine.registerConverter(Level.class, Level::valueOf); commandLine.registerConverter(SyncMode.class, SyncMode::fromString); commandLine.registerConverter(MetricsProtocol.class, MetricsProtocol::fromString); commandLine.registerConverter(UInt256.class, (arg) -> UInt256.valueOf(new BigInteger(arg))); commandLine.registerConverter(Wei.class, (arg) -> Wei.of(Long.parseUnsignedLong(arg))); commandLine.registerConverter(PositiveNumber.class, PositiveNumber::fromString); commandLine.registerConverter(Hash.class, Hash::fromHexString); commandLine.registerConverter(Optional.class, Optional::of); commandLine.registerConverter(Double.class, Double::parseDouble); metricCategoryConverter.addCategories(BesuMetricCategory.class); metricCategoryConverter.addCategories(StandardMetricCategory.class); commandLine.registerConverter(MetricCategory.class, metricCategoryConverter); } private void handleUnstableOptions() { // Add unstable options final ImmutableMap.Builder<String, Object> unstableOptionsBuild = ImmutableMap.builder(); final ImmutableMap<String, Object> unstableOptions = unstableOptionsBuild .put("Ethereum Wire Protocol", unstableEthProtocolOptions) .put("Metrics", unstableMetricsCLIOptions) .put("P2P Network", unstableNetworkingOptions) .put("RPC", unstableRPCOptions) .put("DNS Configuration", unstableDnsOptions) .put("NAT Configuration", unstableNatOptions) .put("Synchronizer", unstableSynchronizerOptions) .put("TransactionPool", unstableTransactionPoolOptions) .put("Ethstats", unstableEthstatsOptions) .put("Mining", unstableMiningOptions) .put("Native Library", unstableNativeLibraryOptions) .put("Data Storage Options", unstableDataStorageOptions) .put("Launcher", unstableLauncherOptions) .build(); UnstableOptionsSubCommand.createUnstableOptions(commandLine, unstableOptions); } private void preparePlugins() { besuPluginContext.addService(PicoCLIOptions.class, new PicoCLIOptionsImpl(commandLine)); besuPluginContext.addService(SecurityModuleService.class, securityModuleService); besuPluginContext.addService(StorageService.class, storageService); besuPluginContext.addService(MetricCategoryRegistry.class, metricCategoryRegistry); // register built-in plugins new RocksDBPlugin().register(besuPluginContext); new InMemoryStoragePlugin().register(besuPluginContext); besuPluginContext.registerPlugins(pluginsDir()); metricCategoryRegistry .getMetricCategories() .forEach(metricCategoryConverter::addRegistryCategory); // register default security module securityModuleService.register( DEFAULT_SECURITY_MODULE, Suppliers.memoize(this::defaultSecurityModule)); } private SecurityModule defaultSecurityModule() { return new KeyPairSecurityModule(loadKeyPair()); } @VisibleForTesting KeyPair loadKeyPair() { return KeyPairUtil.loadKeyPair(nodePrivateKeyFile()); } private void parse( final AbstractParseResultHandler<List<Object>> resultHandler, final BesuExceptionHandler exceptionHandler, final String... args) { // Create a handler that will search for a config file option and use it for // default values // and eventually it will run regular parsing of the remaining options. final ConfigOptionSearchAndRunHandler configParsingHandler = new ConfigOptionSearchAndRunHandler( resultHandler, exceptionHandler, CONFIG_FILE_OPTION_NAME, environment); ParseArgsHelper.getLauncherOptions(unstableLauncherOptions, args); if (unstableLauncherOptions.isLauncherMode() || unstableLauncherOptions.isLauncherModeForced()) { try { final ImmutableLauncherConfig launcherConfig = ImmutableLauncherConfig.builder() .launcherScript(BesuCommand.class.getResourceAsStream("launcher.json")) .addCommandClasses( this, unstableNatOptions, unstableEthstatsOptions, unstableMiningOptions) .isLauncherForced(unstableLauncherOptions.isLauncherModeForced()) .build(); final File file = new LauncherManager(launcherConfig).run(); logger.info("Config file location : {}", file.getAbsolutePath()); commandLine.parseWithHandlers( configParsingHandler, exceptionHandler, String.format("%s=%s", CONFIG_FILE_OPTION_NAME, file.getAbsolutePath())); } catch (LauncherException e) { logger.warn("Unable to run the launcher {}", e.getMessage()); } } else { commandLine.parseWithHandlers(configParsingHandler, exceptionHandler, args); } } private void startSynchronization() { synchronize( besuController, p2pEnabled, peerDiscoveryEnabled, ethNetworkConfig, maxPeers, p2pHost, p2pInterface, p2pPort, graphQLConfiguration, jsonRpcConfiguration, webSocketConfiguration, apiConfiguration, metricsConfiguration, permissioningConfiguration, staticNodes, pidPath); } private BesuCommand startPlugins() { besuPluginContext.addService( BesuEvents.class, new BesuEventsImpl( besuController.getProtocolContext().getBlockchain(), besuController.getProtocolManager().getBlockBroadcaster(), besuController.getTransactionPool(), besuController.getSyncState())); besuPluginContext.addService(MetricsSystem.class, getMetricsSystem()); besuController.getAdditionalPluginServices().appendPluginServices(besuPluginContext); besuPluginContext.startPlugins(); return this; } public void configureLogging(final boolean announce) { // To change the configuration if color was enabled/disabled Configurator.reconfigure(); // set log level per CLI flags if (logLevel != null) { if (announce) { System.out.println("Setting logging level to " + logLevel.name()); } Configurator.setAllLevels("", logLevel); } } public static Optional<Boolean> getColorEnabled() { return Optional.ofNullable(colorEnabled); } private void configureNativeLibs() { if (unstableNativeLibraryOptions.getNativeAltbn128()) { AbstractAltBnPrecompiledContract.enableNative(); } if (unstableNativeLibraryOptions.getNativeSecp256k1()) { SignatureAlgorithmFactory.getInstance().enableNative(); } } private BesuCommand validateOptions() { issueOptionWarnings(); validateP2PInterface(p2pInterface); validateMiningParams(); validateNatParams(); validateNetStatsParams(); validateDnsOptionsParams(); return this; } @SuppressWarnings("ConstantConditions") private void validateMiningParams() { if (isMiningEnabled && coinbase == null) { throw new ParameterException( this.commandLine, "Unable to mine without a valid coinbase. Either disable mining (remove --miner-enabled) " + "or specify the beneficiary of mining (via --miner-coinbase <Address>)"); } if (!isMiningEnabled && iStratumMiningEnabled) { throw new ParameterException( this.commandLine, "Unable to mine with Stratum if mining is disabled. Either disable Stratum mining (remove --miner-stratum-enabled) " + "or specify mining is enabled (--miner-enabled)"); } } protected void validateP2PInterface(final String p2pInterface) { final String failMessage = "The provided --p2p-interface is not available: " + p2pInterface; try { if (!NetworkUtility.isNetworkInterfaceAvailable(p2pInterface)) { throw new ParameterException(commandLine, failMessage); } } catch (final UnknownHostException | SocketException e) { throw new ParameterException(commandLine, failMessage, e); } } @SuppressWarnings("ConstantConditions") private void validateNatParams() { if (!(natMethod.equals(NatMethod.AUTO) || natMethod.equals(NatMethod.KUBERNETES)) && !unstableNatOptions .getNatManagerServiceName() .equals(DEFAULT_BESU_SERVICE_NAME_FILTER)) { throw new ParameterException( this.commandLine, "The `--Xnat-kube-service-name` parameter is only used in kubernetes mode. Either remove --Xnat-kube-service-name" + " or select the KUBERNETES mode (via --nat--method=KUBERNETES)"); } if (natMethod.equals(NatMethod.AUTO) && !unstableNatOptions.getNatMethodFallbackEnabled()) { throw new ParameterException( this.commandLine, "The `--Xnat-method-fallback-enabled` parameter cannot be used in AUTO mode. Either remove --Xnat-method-fallback-enabled" + " or select another mode (via --nat--method=XXXX)"); } } private void validateNetStatsParams() { if (Strings.isNullOrEmpty(unstableEthstatsOptions.getEthstatsUrl()) && !unstableEthstatsOptions.getEthstatsContact().isEmpty()) { throw new ParameterException( this.commandLine, "The `--Xethstats-contact` requires ethstats server URL to be provided. Either remove --Xethstats-contact" + " or provide an url (via --Xethstats=nodename:secret@host:port)"); } } private void validateDnsOptionsParams() { if (!unstableDnsOptions.getDnsEnabled() && unstableDnsOptions.getDnsUpdateEnabled()) { throw new ParameterException( this.commandLine, "The `--Xdns-update-enabled` requires dns to be enabled. Either remove --Xdns-update-enabled" + " or specify dns is enabled (--Xdns-enabled)"); } } private GenesisConfigOptions readGenesisConfigOptions() { final GenesisConfigOptions genesisConfigOptions; try { final GenesisConfigFile genesisConfigFile = GenesisConfigFile.fromConfig(genesisConfig()); genesisConfigOptions = genesisConfigFile.getConfigOptions(genesisConfigOverrides); } catch (final Exception e) { throw new IllegalStateException("Unable to read genesis file for GoQuorum options", e); } return genesisConfigOptions; } private void issueOptionWarnings() { // Check that P2P options are able to work CommandLineUtils.checkOptionDependencies( logger, commandLine, "--p2p-enabled", !p2pEnabled, asList( "--bootnodes", "--discovery-enabled", "--max-peers", "--banned-node-id", "--banned-node-ids", "--p2p-host", "--p2p-interface", "--p2p-port", "--remote-connections-max-percentage")); // Check that mining options are able to work CommandLineUtils.checkOptionDependencies( logger, commandLine, "--miner-enabled", !isMiningEnabled, asList( "--miner-coinbase", "--min-block-occupancy-ratio", "--miner-extra-data", "--miner-stratum-enabled", "--Xminer-remote-sealers-limit", "--Xminer-remote-sealers-hashrate-ttl")); CommandLineUtils.checkMultiOptionDependencies( logger, commandLine, List.of("--miner-enabled", "--goquorum-compatibility-enabled"), List.of(!isMiningEnabled, !isGoQuorumCompatibilityMode), singletonList("--min-gas-price")); CommandLineUtils.checkOptionDependencies( logger, commandLine, "--sync-mode", !SyncMode.FAST.equals(syncMode), singletonList("--fast-sync-min-peers")); if (!securityModuleName.equals(DEFAULT_SECURITY_MODULE) && nodePrivateKeyFile != null) { logger.warn( DEPENDENCY_WARNING_MSG, "--node-private-key-file", "--security-module=" + DEFAULT_SECURITY_MODULE); } } private BesuCommand configure() throws Exception { checkPortClash(); syncMode = Optional.ofNullable(syncMode) .orElse( genesisFile == null && !isPrivacyEnabled && network != NetworkName.DEV ? SyncMode.FAST : SyncMode.FULL); ethNetworkConfig = updateNetworkConfig(getNetwork()); checkGoQuorumGenesisConfig(); checkGoQuorumCompatibilityConfig(ethNetworkConfig); jsonRpcConfiguration = jsonRpcConfiguration(); graphQLConfiguration = graphQLConfiguration(); webSocketConfiguration = webSocketConfiguration(); apiConfiguration = apiConfiguration(); // hostsWhitelist is a hidden option. If it is specified, add the list to hostAllowlist if (!hostsWhitelist.isEmpty()) { // if allowlist == default values, remove the default values if (hostsAllowlist.size() == 2 && hostsAllowlist.containsAll(List.of("localhost", "127.0.0.1"))) { hostsAllowlist.removeAll(List.of("localhost", "127.0.0.1")); } hostsAllowlist.addAll(hostsWhitelist); } permissioningConfiguration = permissioningConfiguration(); staticNodes = loadStaticNodes(); logger.info("Connecting to {} static nodes.", staticNodes.size()); logger.trace("Static Nodes = {}", staticNodes); final List<EnodeURL> enodeURIs = ethNetworkConfig.getBootNodes(); permissioningConfiguration .flatMap(PermissioningConfiguration::getLocalConfig) .ifPresent(p -> ensureAllNodesAreInAllowlist(enodeURIs, p)); permissioningConfiguration .flatMap(PermissioningConfiguration::getLocalConfig) .ifPresent(p -> ensureAllNodesAreInAllowlist(staticNodes, p)); metricsConfiguration = metricsConfiguration(); logger.info("Security Module: {}", securityModuleName); instantiateSignatureAlgorithmFactory(); return this; } private GoQuorumPrivacyParameters configureGoQuorumPrivacy( final KeyValueStorageProvider storageProvider) { return new GoQuorumPrivacyParameters( createGoQuorumEnclave(), readEnclaveKey(), storageProvider.createGoQuorumPrivateStorage(), createPrivateWorldStateArchive(storageProvider)); } private GoQuorumEnclave createGoQuorumEnclave() { final EnclaveFactory enclaveFactory = new EnclaveFactory(Vertx.vertx()); if (privacyKeyStoreFile != null) { return enclaveFactory.createGoQuorumEnclave( privacyUrl, privacyKeyStoreFile, privacyKeyStorePasswordFile, privacyTlsKnownEnclaveFile); } else { return enclaveFactory.createGoQuorumEnclave(privacyUrl); } } private String readEnclaveKey() { final String key; try { key = Files.asCharSource(privacyPublicKeyFile, UTF_8).read(); } catch (final Exception e) { throw new ParameterException( this.commandLine, "--privacy-public-key-file must be set when --goquorum-compatibility-enabled is set to true.", e); } if (key.length() != 44) { throw new IllegalArgumentException( "Contents of enclave public key file needs to be 44 characters long to decode to a valid 32 byte public key."); } // throws exception if invalid base 64 Base64.getDecoder().decode(key); return key; } private NetworkName getNetwork() { // noinspection ConstantConditions network is not always null but injected by // PicoCLI if used return network == null ? MAINNET : network; } private void ensureAllNodesAreInAllowlist( final Collection<EnodeURL> enodeAddresses, final LocalPermissioningConfiguration permissioningConfiguration) { try { PermissioningConfigurationValidator.areAllNodesAreInAllowlist( enodeAddresses, permissioningConfiguration); } catch (final Exception e) { throw new ParameterException(this.commandLine, e.getMessage()); } } private BesuCommand controller() { besuController = buildController(); return this; } public BesuController buildController() { try { return getControllerBuilder().build(); } catch (final Exception e) { throw new ExecutionException(this.commandLine, e.getMessage(), e); } } public BesuControllerBuilder getControllerBuilder() { final KeyValueStorageProvider storageProvider = keyValueStorageProvider(keyValueStorageName); return controllerBuilderFactory .fromEthNetworkConfig(updateNetworkConfig(getNetwork()), genesisConfigOverrides) .synchronizerConfiguration(buildSyncConfig()) .ethProtocolConfiguration(unstableEthProtocolOptions.toDomainObject()) .dataDirectory(dataDir()) .miningParameters( new MiningParameters( coinbase, minTransactionGasPrice, extraData, isMiningEnabled, iStratumMiningEnabled, stratumNetworkInterface, stratumPort, unstableMiningOptions.getStratumExtranonce(), Optional.empty(), minBlockOccupancyRatio, unstableMiningOptions.getRemoteSealersLimit(), unstableMiningOptions.getRemoteSealersTimeToLive())) .transactionPoolConfiguration(buildTransactionPoolConfiguration()) .nodeKey(buildNodeKey()) .metricsSystem(metricsSystem.get()) .privacyParameters(privacyParameters(storageProvider)) .clock(Clock.systemUTC()) .isRevertReasonEnabled(isRevertReasonEnabled) .storageProvider(storageProvider) .isPruningEnabled(isPruningEnabled()) .pruningConfiguration( new PrunerConfiguration(pruningBlockConfirmations, pruningBlocksRetained)) .genesisConfigOverrides(genesisConfigOverrides) .gasLimitCalculator( Optional.ofNullable(targetGasLimit) .<GasLimitCalculator>map(TargetingGasLimitCalculator::new) .orElse(GasLimitCalculator.constant())) .requiredBlocks(requiredBlocks) .reorgLoggingThreshold(reorgLoggingThreshold) .dataStorageConfiguration(unstableDataStorageOptions.toDomainObject()); } private GraphQLConfiguration graphQLConfiguration() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--graphql-http-enabled", !isGraphQLHttpEnabled, asList("--graphql-http-cors-origins", "--graphql-http-host", "--graphql-http-port")); final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault(); graphQLConfiguration.setEnabled(isGraphQLHttpEnabled); graphQLConfiguration.setHost(graphQLHttpHost); graphQLConfiguration.setPort(graphQLHttpPort); graphQLConfiguration.setHostsAllowlist(hostsAllowlist); graphQLConfiguration.setCorsAllowedDomains(graphQLHttpCorsAllowedOrigins); graphQLConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec()); return graphQLConfiguration; } private JsonRpcConfiguration jsonRpcConfiguration() { checkRpcTlsClientAuthOptionsDependencies(); checkRpcTlsOptionsDependencies(); checkRpcHttpOptionsDependencies(); if (isRpcHttpAuthenticationEnabled && rpcHttpAuthenticationCredentialsFile() == null && rpcHttpAuthenticationPublicKeyFile == null) { throw new ParameterException( commandLine, "Unable to authenticate JSON-RPC HTTP endpoint without a supplied credentials file or authentication public key file"); } final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault(); jsonRpcConfiguration.setEnabled(isRpcHttpEnabled); jsonRpcConfiguration.setHost(rpcHttpHost); jsonRpcConfiguration.setPort(rpcHttpPort); jsonRpcConfiguration.setMaxActiveConnections(rpcHttpMaxConnections); jsonRpcConfiguration.setCorsAllowedDomains(rpcHttpCorsAllowedOrigins); jsonRpcConfiguration.setRpcApis(rpcHttpApis.stream().distinct().collect(Collectors.toList())); jsonRpcConfiguration.setHostsAllowlist(hostsAllowlist); jsonRpcConfiguration.setAuthenticationEnabled(isRpcHttpAuthenticationEnabled); jsonRpcConfiguration.setAuthenticationCredentialsFile(rpcHttpAuthenticationCredentialsFile()); jsonRpcConfiguration.setAuthenticationPublicKeyFile(rpcHttpAuthenticationPublicKeyFile); jsonRpcConfiguration.setTlsConfiguration(rpcHttpTlsConfiguration()); jsonRpcConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec()); return jsonRpcConfiguration; } private void checkRpcHttpOptionsDependencies() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--rpc-http-enabled", !isRpcHttpEnabled, asList( "--rpc-http-api", "--rpc-http-apis", "--rpc-http-cors-origins", "--rpc-http-host", "--rpc-http-port", "--rpc-http-max-active-connections", "--rpc-http-authentication-enabled", "--rpc-http-authentication-credentials-file", "--rpc-http-authentication-public-key-file", "--rpc-http-tls-enabled", "--rpc-http-tls-keystore-file", "--rpc-http-tls-keystore-password-file", "--rpc-http-tls-client-auth-enabled", "--rpc-http-tls-known-clients-file", "--rpc-http-tls-ca-clients-enabled")); } private void checkRpcTlsOptionsDependencies() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--rpc-http-tls-enabled", !isRpcHttpTlsEnabled, asList( "--rpc-http-tls-keystore-file", "--rpc-http-tls-keystore-password-file", "--rpc-http-tls-client-auth-enabled", "--rpc-http-tls-known-clients-file", "--rpc-http-tls-ca-clients-enabled")); } private void checkRpcTlsClientAuthOptionsDependencies() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--rpc-http-tls-client-auth-enabled", !isRpcHttpTlsClientAuthEnabled, asList("--rpc-http-tls-known-clients-file", "--rpc-http-tls-ca-clients-enabled")); } private void checkPrivacyTlsOptionsDependencies() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--privacy-tls-enabled", !isPrivacyTlsEnabled, asList( "--privacy-tls-keystore-file", "--privacy-tls-keystore-password-file", "--privacy-tls-known-enclave-file")); } private Optional<TlsConfiguration> rpcHttpTlsConfiguration() { if (!isRpcTlsConfigurationRequired()) { return Optional.empty(); } if (rpcHttpTlsKeyStoreFile == null) { throw new ParameterException( commandLine, "Keystore file is required when TLS is enabled for JSON-RPC HTTP endpoint"); } if (rpcHttpTlsKeyStorePasswordFile == null) { throw new ParameterException( commandLine, "File containing password to unlock keystore is required when TLS is enabled for JSON-RPC HTTP endpoint"); } if (isRpcHttpTlsClientAuthEnabled && !isRpcHttpTlsCAClientsEnabled && rpcHttpTlsKnownClientsFile == null) { throw new ParameterException( commandLine, "Known-clients file must be specified or CA clients must be enabled when TLS client authentication is enabled for JSON-RPC HTTP endpoint"); } return Optional.of( TlsConfiguration.Builder.aTlsConfiguration() .withKeyStorePath(rpcHttpTlsKeyStoreFile) .withKeyStorePasswordSupplier( new FileBasedPasswordProvider(rpcHttpTlsKeyStorePasswordFile)) .withClientAuthConfiguration(rpcHttpTlsClientAuthConfiguration()) .build()); } private TlsClientAuthConfiguration rpcHttpTlsClientAuthConfiguration() { if (isRpcHttpTlsClientAuthEnabled) { return TlsClientAuthConfiguration.Builder.aTlsClientAuthConfiguration() .withKnownClientsFile(rpcHttpTlsKnownClientsFile) .withCaClientsEnabled(isRpcHttpTlsCAClientsEnabled) .build(); } return null; } private boolean isRpcTlsConfigurationRequired() { return isRpcHttpEnabled && isRpcHttpTlsEnabled; } private WebSocketConfiguration webSocketConfiguration() { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--rpc-ws-enabled", !isRpcWsEnabled, asList( "--rpc-ws-api", "--rpc-ws-apis", "--rpc-ws-host", "--rpc-ws-port", "--rpc-ws-max-active-connections", "--rpc-ws-authentication-enabled", "--rpc-ws-authentication-credentials-file", "--rpc-ws-authentication-public-key-file")); if (isRpcWsAuthenticationEnabled && rpcWsAuthenticationCredentialsFile() == null && rpcWsAuthenticationPublicKeyFile == null) { throw new ParameterException( commandLine, "Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file"); } final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault(); webSocketConfiguration.setEnabled(isRpcWsEnabled); webSocketConfiguration.setHost(rpcWsHost); webSocketConfiguration.setPort(rpcWsPort); webSocketConfiguration.setMaxActiveConnections(rpcWsMaxConnections); webSocketConfiguration.setRpcApis(rpcWsApis); webSocketConfiguration.setAuthenticationEnabled(isRpcWsAuthenticationEnabled); webSocketConfiguration.setAuthenticationCredentialsFile(rpcWsAuthenticationCredentialsFile()); webSocketConfiguration.setHostsAllowlist(hostsAllowlist); webSocketConfiguration.setAuthenticationPublicKeyFile(rpcWsAuthenticationPublicKeyFile); webSocketConfiguration.setTimeoutSec(unstableRPCOptions.getWsTimeoutSec()); return webSocketConfiguration; } private ApiConfiguration apiConfiguration() { return ImmutableApiConfiguration.builder() .gasPriceBlocks(apiGasPriceBlocks) .gasPricePercentile(apiGasPricePercentile) .gasPriceMin(minTransactionGasPrice.toLong()) .gasPriceMax(apiGasPriceMax) .build(); } public MetricsConfiguration metricsConfiguration() { if (isMetricsEnabled && isMetricsPushEnabled) { throw new ParameterException( this.commandLine, "--metrics-enabled option and --metrics-push-enabled option can't be used at the same " + "time. Please refer to CLI reference for more details about this constraint."); } CommandLineUtils.checkOptionDependencies( logger, commandLine, "--metrics-enabled", !isMetricsEnabled, asList("--metrics-host", "--metrics-port")); CommandLineUtils.checkOptionDependencies( logger, commandLine, "--metrics-push-enabled", !isMetricsPushEnabled, asList( "--metrics-push-host", "--metrics-push-port", "--metrics-push-interval", "--metrics-push-prometheus-job")); return unstableMetricsCLIOptions .toDomainObject() .enabled(isMetricsEnabled) .host(metricsHost) .port(metricsPort) .protocol(metricsProtocol) .metricCategories(metricCategories) .pushEnabled(isMetricsPushEnabled) .pushHost(metricsPushHost) .pushPort(metricsPushPort) .pushInterval(metricsPushInterval) .hostsAllowlist(hostsAllowlist) .prometheusJob(metricsPrometheusJob) .build(); } private Optional<PermissioningConfiguration> permissioningConfiguration() throws Exception { if (!(localPermissionsEnabled() || contractPermissionsEnabled())) { if (rpcHttpApis.contains(RpcApis.PERM) || rpcWsApis.contains(RpcApis.PERM)) { logger.warn( "Permissions are disabled. Cannot enable PERM APIs when not using Permissions."); } return Optional.empty(); } final Optional<LocalPermissioningConfiguration> localPermissioningConfigurationOptional; if (localPermissionsEnabled()) { final Optional<String> nodePermissioningConfigFile = Optional.ofNullable(nodePermissionsConfigFile); final Optional<String> accountPermissioningConfigFile = Optional.ofNullable(accountPermissionsConfigFile); final LocalPermissioningConfiguration localPermissioningConfiguration = PermissioningConfigurationBuilder.permissioningConfiguration( permissionsNodesEnabled, getEnodeDnsConfiguration(), nodePermissioningConfigFile.orElse(getDefaultPermissioningFilePath()), permissionsAccountsEnabled, accountPermissioningConfigFile.orElse(getDefaultPermissioningFilePath())); localPermissioningConfigurationOptional = Optional.of(localPermissioningConfiguration); } else { if (nodePermissionsConfigFile != null && !permissionsNodesEnabled) { logger.warn( "Node permissioning config file set {} but no permissions enabled", nodePermissionsConfigFile); } if (accountPermissionsConfigFile != null && !permissionsAccountsEnabled) { logger.warn( "Account permissioning config file set {} but no permissions enabled", accountPermissionsConfigFile); } localPermissioningConfigurationOptional = Optional.empty(); } final SmartContractPermissioningConfiguration smartContractPermissioningConfiguration = SmartContractPermissioningConfiguration.createDefault(); if (permissionsNodesContractEnabled) { if (permissionsNodesContractAddress == null) { throw new ParameterException( this.commandLine, "No node permissioning contract address specified. Cannot enable smart contract based node permissioning."); } else { smartContractPermissioningConfiguration.setSmartContractNodeAllowlistEnabled( permissionsNodesContractEnabled); smartContractPermissioningConfiguration.setNodeSmartContractAddress( permissionsNodesContractAddress); smartContractPermissioningConfiguration.setNodeSmartContractInterfaceVersion( permissionsNodesContractVersion); } } else if (permissionsNodesContractAddress != null) { logger.warn( "Node permissioning smart contract address set {} but smart contract node permissioning is disabled.", permissionsNodesContractAddress); } if (permissionsAccountsContractEnabled) { if (permissionsAccountsContractAddress == null) { throw new ParameterException( this.commandLine, "No account permissioning contract address specified. Cannot enable smart contract based account permissioning."); } else { smartContractPermissioningConfiguration.setSmartContractAccountAllowlistEnabled( permissionsAccountsContractEnabled); smartContractPermissioningConfiguration.setAccountSmartContractAddress( permissionsAccountsContractAddress); } } else if (permissionsAccountsContractAddress != null) { logger.warn( "Account permissioning smart contract address set {} but smart contract account permissioning is disabled.", permissionsAccountsContractAddress); } final PermissioningConfiguration permissioningConfiguration = new PermissioningConfiguration( localPermissioningConfigurationOptional, Optional.of(smartContractPermissioningConfiguration), quorumPermissioningConfig()); return Optional.of(permissioningConfiguration); } private Optional<GoQuorumPermissioningConfiguration> quorumPermissioningConfig() { if (!isGoQuorumCompatibilityMode) { return Optional.empty(); } try { final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions(); final OptionalLong qip714BlockNumber = genesisConfigOptions.getQip714BlockNumber(); return Optional.of( GoQuorumPermissioningConfiguration.enabled( qip714BlockNumber.orElse(QIP714_DEFAULT_BLOCK))); } catch (final Exception e) { throw new IllegalStateException("Error reading GoQuorum permissioning options", e); } } private boolean localPermissionsEnabled() { return permissionsAccountsEnabled || permissionsNodesEnabled; } private boolean contractPermissionsEnabled() { return permissionsNodesContractEnabled || permissionsAccountsContractEnabled; } private PrivacyParameters privacyParameters(final KeyValueStorageProvider storageProvider) { CommandLineUtils.checkOptionDependencies( logger, commandLine, "--privacy-enabled", !isPrivacyEnabled, asList("--privacy-multi-tenancy-enabled", "--privacy-tls-enabled")); CommandLineUtils.checkMultiOptionDependencies( logger, commandLine, List.of("--privacy-enabled", "--goquorum-compatibility-enabled"), List.of(!isPrivacyEnabled, !isGoQuorumCompatibilityMode), List.of("--privacy-url", "--privacy-public-key-file")); checkPrivacyTlsOptionsDependencies(); final PrivacyParameters.Builder privacyParametersBuilder = new PrivacyParameters.Builder(); if (isPrivacyEnabled) { final String errorSuffix = "cannot be enabled with privacy."; if (syncMode == SyncMode.FAST) { throw new ParameterException(commandLine, String.format("%s %s", "Fast sync", errorSuffix)); } if (isPruningEnabled()) { throw new ParameterException(commandLine, String.format("%s %s", "Pruning", errorSuffix)); } if (isGoQuorumCompatibilityMode) { throw new ParameterException( commandLine, String.format("%s %s", "GoQuorum mode", errorSuffix)); } if (isPrivacyMultiTenancyEnabled && !jsonRpcConfiguration.isAuthenticationEnabled() && !webSocketConfiguration.isAuthenticationEnabled()) { throw new ParameterException( commandLine, "Privacy multi-tenancy requires either http authentication to be enabled or WebSocket authentication to be enabled"); } privacyParametersBuilder.setEnabled(true); privacyParametersBuilder.setEnclaveUrl(privacyUrl); privacyParametersBuilder.setMultiTenancyEnabled(isPrivacyMultiTenancyEnabled); privacyParametersBuilder.setOnchainPrivacyGroupsEnabled(isFlexiblePrivacyGroupsEnabled); final boolean hasPrivacyPublicKey = privacyPublicKeyFile != null; if (hasPrivacyPublicKey && !isPrivacyMultiTenancyEnabled) { try { privacyParametersBuilder.setEnclavePublicKeyUsingFile(privacyPublicKeyFile); } catch (final IOException e) { throw new ParameterException( commandLine, "Problem with privacy-public-key-file: " + e.getMessage(), e); } catch (final IllegalArgumentException e) { throw new ParameterException( commandLine, "Contents of privacy-public-key-file invalid: " + e.getMessage(), e); } } else if (hasPrivacyPublicKey) { throw new ParameterException( commandLine, "Privacy multi-tenancy and privacy public key cannot be used together"); } else if (!isPrivacyMultiTenancyEnabled) { throw new ParameterException( commandLine, "Please specify Enclave public key file path to enable privacy"); } if (Wei.ZERO.compareTo(minTransactionGasPrice) < 0) { // if gas is required, cannot use random keys to sign private tx // ie --privacy-marker-transaction-signing-key-file must be set if (privacyMarkerTransactionSigningKeyPath == null) { throw new ParameterException( commandLine, "Not a free gas network. --privacy-marker-transaction-signing-key-file must be specified and must be a funded account. Private transactions cannot be signed by random (non-funded) accounts in paid gas networks"); } } if (!Address.PRIVACY.equals(privacyPrecompiledAddress)) { logger.warn( "--privacy-precompiled-address option is deprecated. This address is derived, based on --privacy-onchain-groups-enabled."); } privacyParametersBuilder.setPrivateKeyPath(privacyMarkerTransactionSigningKeyPath); privacyParametersBuilder.setStorageProvider( privacyKeyStorageProvider(keyValueStorageName + "-privacy")); if (isPrivacyTlsEnabled) { privacyParametersBuilder.setPrivacyKeyStoreFile(privacyKeyStoreFile); privacyParametersBuilder.setPrivacyKeyStorePasswordFile(privacyKeyStorePasswordFile); privacyParametersBuilder.setPrivacyTlsKnownEnclaveFile(privacyTlsKnownEnclaveFile); } privacyParametersBuilder.setEnclaveFactory(new EnclaveFactory(vertx)); } else if (isGoQuorumCompatibilityMode) { privacyParametersBuilder.setGoQuorumPrivacyParameters( Optional.of(configureGoQuorumPrivacy(storageProvider))); } if (!isPrivacyEnabled && anyPrivacyApiEnabled()) { logger.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy."); } if (!isGoQuorumCompatibilityMode && (rpcHttpApis.contains(RpcApis.GOQUORUM) || rpcWsApis.contains(RpcApis.GOQUORUM))) { logger.warn("Cannot use GOQUORUM API methods when not in GoQuorum mode."); } final PrivacyParameters privacyParameters = privacyParametersBuilder.build(); if (isPrivacyEnabled) { preSynchronizationTaskRunner.addTask( new PrivateDatabaseMigrationPreSyncTask(privacyParameters, migratePrivateDatabase)); } return privacyParameters; } public WorldStateArchive createPrivateWorldStateArchive(final StorageProvider storageProvider) { final WorldStateStorage privateWorldStateStorage = storageProvider.createPrivateWorldStateStorage(); final WorldStatePreimageStorage preimageStorage = storageProvider.createPrivateWorldStatePreimageStorage(); return new DefaultWorldStateArchive(privateWorldStateStorage, preimageStorage); } private boolean anyPrivacyApiEnabled() { return rpcHttpApis.contains(RpcApis.EEA) || rpcWsApis.contains(RpcApis.EEA) || rpcHttpApis.contains(RpcApis.PRIV) || rpcWsApis.contains(RpcApis.PRIV); } private PrivacyKeyValueStorageProvider privacyKeyStorageProvider(final String name) { return new PrivacyKeyValueStorageProviderBuilder() .withStorageFactory(privacyKeyValueStorageFactory(name)) .withCommonConfiguration(pluginCommonConfiguration) .withMetricsSystem(getMetricsSystem()) .build(); } private PrivacyKeyValueStorageFactory privacyKeyValueStorageFactory(final String name) { return (PrivacyKeyValueStorageFactory) storageService .getByName(name) .orElseThrow( () -> new StorageException("No KeyValueStorageFactory found for key: " + name)); } private KeyValueStorageProvider keyValueStorageProvider(final String name) { if (this.keyValueStorageProvider == null) { this.keyValueStorageProvider = new KeyValueStorageProviderBuilder() .withStorageFactory( storageService .getByName(name) .orElseThrow( () -> new StorageException( "No KeyValueStorageFactory found for key: " + name))) .withCommonConfiguration(pluginCommonConfiguration) .withMetricsSystem(getMetricsSystem()) .build(); } return this.keyValueStorageProvider; } private SynchronizerConfiguration buildSyncConfig() { return unstableSynchronizerOptions .toDomainObject() .syncMode(syncMode) .fastSyncMinimumPeerCount(fastSyncMinPeerCount) .build(); } private TransactionPoolConfiguration buildTransactionPoolConfiguration() { return unstableTransactionPoolOptions .toDomainObject() .txPoolMaxSize(txPoolMaxSize) .pooledTransactionHashesSize(pooledTransactionHashesSize) .pendingTxRetentionPeriod(pendingTxRetentionPeriod) .priceBump(Percentage.fromInt(priceBump)) .txFeeCap(txFeeCap) .build(); } private boolean isPruningEnabled() { return pruningEnabled; } // Blockchain synchronisation from peers. private void synchronize( final BesuController controller, final boolean p2pEnabled, final boolean peerDiscoveryEnabled, final EthNetworkConfig ethNetworkConfig, final int maxPeers, final String p2pAdvertisedHost, final String p2pListenInterface, final int p2pListenPort, final GraphQLConfiguration graphQLConfiguration, final JsonRpcConfiguration jsonRpcConfiguration, final WebSocketConfiguration webSocketConfiguration, final ApiConfiguration apiConfiguration, final MetricsConfiguration metricsConfiguration, final Optional<PermissioningConfiguration> permissioningConfiguration, final Collection<EnodeURL> staticNodes, final Path pidPath) { checkNotNull(runnerBuilder); permissioningConfiguration.ifPresent(runnerBuilder::permissioningConfiguration); final ObservableMetricsSystem metricsSystem = this.metricsSystem.get(); final Runner runner = runnerBuilder .vertx(vertx) .besuController(controller) .p2pEnabled(p2pEnabled) .natMethod(natMethod) .natManagerServiceName(unstableNatOptions.getNatManagerServiceName()) .natMethodFallbackEnabled(unstableNatOptions.getNatMethodFallbackEnabled()) .discovery(peerDiscoveryEnabled) .ethNetworkConfig(ethNetworkConfig) .p2pAdvertisedHost(p2pAdvertisedHost) .p2pListenInterface(p2pListenInterface) .p2pListenPort(p2pListenPort) .maxPeers(maxPeers) .limitRemoteWireConnectionsEnabled(isLimitRemoteWireConnectionsEnabled) .fractionRemoteConnectionsAllowed( Fraction.fromPercentage(maxRemoteConnectionsPercentage).getValue()) .randomPeerPriority(randomPeerPriority) .networkingConfiguration(unstableNetworkingOptions.toDomainObject()) .graphQLConfiguration(graphQLConfiguration) .jsonRpcConfiguration(jsonRpcConfiguration) .webSocketConfiguration(webSocketConfiguration) .apiConfiguration(apiConfiguration) .pidPath(pidPath) .dataDir(dataDir()) .bannedNodeIds(bannedNodeIds) .metricsSystem(metricsSystem) .metricsConfiguration(metricsConfiguration) .staticNodes(staticNodes) .identityString(identityString) .besuPluginContext(besuPluginContext) .autoLogBloomCaching(autoLogBloomCachingEnabled) .ethstatsUrl(unstableEthstatsOptions.getEthstatsUrl()) .ethstatsContact(unstableEthstatsOptions.getEthstatsContact()) .storageProvider(keyValueStorageProvider(keyValueStorageName)) .forkIdSupplier(() -> besuController.getProtocolManager().getForkIdAsBytesList()) .build(); addShutdownHook(runner); runner.start(); runner.awaitStop(); } protected Vertx createVertx(final VertxOptions vertxOptions) { return Vertx.vertx(vertxOptions); } private VertxOptions createVertxOptions(final MetricsSystem metricsSystem) { return new VertxOptions() .setMetricsOptions( new MetricsOptions() .setEnabled(true) .setFactory(new VertxMetricsAdapterFactory(metricsSystem))); } private void addShutdownHook(final Runner runner) { Runtime.getRuntime() .addShutdownHook( new Thread( () -> { try { besuPluginContext.stopPlugins(); runner.close(); LogManager.shutdown(); } catch (final Exception e) { logger.error("Failed to stop Besu"); } })); } // Used to discover the default IP of the client. // Loopback IP is used by default as this is how smokeTests require it to be // and it's probably a good security behaviour to default only on the localhost. private InetAddress autoDiscoverDefaultIP() { if (autoDiscoveredDefaultIP != null) { return autoDiscoveredDefaultIP; } autoDiscoveredDefaultIP = InetAddress.getLoopbackAddress(); return autoDiscoveredDefaultIP; } private EthNetworkConfig updateNetworkConfig(final NetworkName network) { final EthNetworkConfig.Builder builder = new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(network)); // custom genesis file use comes with specific default values for the genesis // file itself // but also for the network id and the bootnodes list. if (genesisFile != null) { // noinspection ConstantConditions network is not always null but injected by // PicoCLI if used if (this.network != null) { // We check if network option was really provided by user and not only looking // at the // default value. // if user provided it and provided the genesis file option at the same time, it // raises a // conflict error throw new ParameterException( this.commandLine, "--network option and --genesis-file option can't be used at the same time. Please " + "refer to CLI reference for more details about this constraint."); } builder.setGenesisConfig(genesisConfig()); if (networkId == null) { // if no network id option is defined on the CLI we have to set a default value // from the // genesis file. // We do the genesis parsing only in this case as we already have network id // constants // for known networks to speed up the process. // Also we have to parse the genesis as we don't already have a parsed version // at this // stage. // If no chain id is found in the genesis as it's an optional, we use mainnet // network id. try { builder.setNetworkId( getGenesisConfigFile() .getConfigOptions(genesisConfigOverrides) .getChainId() .orElse(EthNetworkConfig.getNetworkConfig(MAINNET).getNetworkId())); } catch (final DecodeException e) { throw new ParameterException( this.commandLine, String.format("Unable to parse genesis file %s.", genesisFile), e); } catch (final ArithmeticException e) { throw new ParameterException( this.commandLine, "No networkId specified and chainId in " + "genesis file is too large to be used as a networkId"); } } if (bootNodes == null) { // We default to an empty bootnodes list if the option is not provided on CLI // because // mainnet bootnodes won't work as the default value for a custom genesis, // so it's better to have an empty list as default value that forces to create a // custom one // than a useless one that may make user think that it can work when it can't. builder.setBootNodes(new ArrayList<>()); } builder.setDnsDiscoveryUrl(null); } if (discoveryDnsUrl != null) { builder.setDnsDiscoveryUrl(discoveryDnsUrl); } if (networkId != null) { builder.setNetworkId(networkId); } if (bootNodes != null) { if (!peerDiscoveryEnabled) { logger.warn("Discovery disabled: bootnodes will be ignored."); } try { final List<EnodeURL> listBootNodes = bootNodes.stream() .filter(value -> !value.isEmpty()) .map(url -> EnodeURL.fromString(url, getEnodeDnsConfiguration())) .collect(Collectors.toList()); DiscoveryConfiguration.assertValidBootnodes(listBootNodes); builder.setBootNodes(listBootNodes); } catch (final IllegalArgumentException e) { throw new ParameterException(commandLine, e.getMessage()); } } return builder.build(); } private GenesisConfigFile getGenesisConfigFile() { return GenesisConfigFile.fromConfig(genesisConfig()); } private String genesisConfig() { try { return Resources.toString(genesisFile.toURI().toURL(), UTF_8); } catch (final IOException e) { throw new ParameterException( this.commandLine, String.format("Unable to load genesis file %s.", genesisFile), e); } } // dataDir() is public because it is accessed by subcommands public Path dataDir() { return dataPath.toAbsolutePath(); } private Path pluginsDir() { final String pluginsDir = System.getProperty("besu.plugins.dir"); if (pluginsDir == null) { return new File(System.getProperty("besu.home", "."), "plugins").toPath(); } else { return new File(pluginsDir).toPath(); } } @VisibleForTesting NodeKey buildNodeKey() { return new NodeKey(securityModule()); } private SecurityModule securityModule() { return securityModuleService .getByName(securityModuleName) .orElseThrow(() -> new RuntimeException("Security Module not found: " + securityModuleName)) .get(); } private File nodePrivateKeyFile() { return Optional.ofNullable(nodePrivateKeyFile) .orElseGet(() -> KeyPairUtil.getDefaultKeyFile(dataDir())); } private String rpcHttpAuthenticationCredentialsFile() { final String filename = rpcHttpAuthenticationCredentialsFile; if (filename != null) { RpcAuthFileValidator.validate(commandLine, filename, "HTTP"); } return filename; } private String rpcWsAuthenticationCredentialsFile() { final String filename = rpcWsAuthenticationCredentialsFile; if (filename != null) { RpcAuthFileValidator.validate(commandLine, filename, "WS"); } return filename; } private String getDefaultPermissioningFilePath() { return dataDir() + System.getProperty("file.separator") + DefaultCommandValues.PERMISSIONING_CONFIG_LOCATION; } public MetricsSystem getMetricsSystem() { return metricsSystem.get(); } private Set<EnodeURL> loadStaticNodes() throws IOException { final Path staticNodesPath; if (staticNodesFile != null) { staticNodesPath = staticNodesFile.toAbsolutePath(); if (!staticNodesPath.toFile().exists()) { throw new ParameterException( commandLine, String.format("Static nodes file %s does not exist", staticNodesPath)); } } else { final String staticNodesFilename = "static-nodes.json"; staticNodesPath = dataDir().resolve(staticNodesFilename); } logger.info("Static Nodes file = {}", staticNodesPath); return StaticNodesParser.fromPath(staticNodesPath, getEnodeDnsConfiguration()); } public BesuExceptionHandler exceptionHandler() { return new BesuExceptionHandler(this::getLogLevel); } public EnodeDnsConfiguration getEnodeDnsConfiguration() { if (enodeDnsConfiguration == null) { enodeDnsConfiguration = unstableDnsOptions.toDomainObject(); } return enodeDnsConfiguration; } private void checkPortClash() { getEffectivePorts().stream() .filter(Objects::nonNull) .filter(port -> port > 0) .forEach( port -> { if (!allocatedPorts.add(port)) { throw new ParameterException( commandLine, "Port number '" + port + "' has been specified multiple times. Please review the supplied configuration."); } }); } /** * * Gets the list of effective ports (ports that are enabled). * * @return The list of effective ports */ private List<Integer> getEffectivePorts() { final List<Integer> effectivePorts = new ArrayList<>(); addPortIfEnabled(effectivePorts, p2pPort, p2pEnabled); addPortIfEnabled(effectivePorts, graphQLHttpPort, isGraphQLHttpEnabled); addPortIfEnabled(effectivePorts, rpcHttpPort, isRpcHttpEnabled); addPortIfEnabled(effectivePorts, rpcWsPort, isRpcWsEnabled); addPortIfEnabled(effectivePorts, metricsPort, isMetricsEnabled); addPortIfEnabled(effectivePorts, metricsPushPort, isMetricsPushEnabled); addPortIfEnabled(effectivePorts, stratumPort, iStratumMiningEnabled); return effectivePorts; } /** * Adds port in the passed list only if enabled. * * @param ports The list of ports * @param port The port value * @param enabled true if enabled, false otherwise */ private void addPortIfEnabled( final List<Integer> ports, final Integer port, final boolean enabled) { if (enabled) { ports.add(port); } } private void checkGoQuorumGenesisConfig() { if (genesisFile != null) { if (readGenesisConfigOptions().isQuorum() && !isGoQuorumCompatibilityMode) { throw new IllegalStateException( "Cannot use GoQuorum genesis file without GoQuorum privacy enabled"); } } } private void checkGoQuorumCompatibilityConfig(final EthNetworkConfig ethNetworkConfig) { if (isGoQuorumCompatibilityMode) { if (genesisFile == null) { throw new ParameterException( this.commandLine, "--genesis-file must be specified if GoQuorum compatibility mode is enabled."); } final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions(); // this static flag is read by the RLP decoder GoQuorumOptions.goQuorumCompatibilityMode = true; if (!genesisConfigOptions.isQuorum()) { throw new IllegalStateException( "GoQuorum compatibility mode (enabled) can only be used if genesis file has 'isQuorum' flag set to true."); } if (!minTransactionGasPrice.isZero()) { throw new ParameterException( this.commandLine, "--min-gas-price must be set to zero if GoQuorum compatibility is enabled in the genesis config."); } if (ensureGoQuorumCompatibilityModeNotUsedOnMainnet(genesisConfigOptions, ethNetworkConfig)) { throw new ParameterException( this.commandLine, "GoQuorum compatibility mode (enabled) cannot be used on Mainnet."); } } } private static boolean ensureGoQuorumCompatibilityModeNotUsedOnMainnet( final GenesisConfigOptions genesisConfigOptions, final EthNetworkConfig ethNetworkConfig) { return ethNetworkConfig.getNetworkId().equals(EthNetworkConfig.MAINNET_NETWORK_ID) || genesisConfigOptions .getChainId() .map(chainId -> chainId.equals(EthNetworkConfig.MAINNET_NETWORK_ID)) .orElse(false); } @VisibleForTesting Level getLogLevel() { return logLevel; } private class BesuCommandConfigurationService implements BesuConfiguration { @Override public Path getStoragePath() { return dataDir().resolve(DATABASE_PATH); } @Override public Path getDataPath() { return dataDir(); } @Override public int getDatabaseVersion() { return unstableDataStorageOptions .toDomainObject() .getDataStorageFormat() .getDatabaseVersion(); } } private void instantiateSignatureAlgorithmFactory() { if (SignatureAlgorithmFactory.isInstanceSet()) { return; } Optional<String> ecCurve = getEcCurveFromGenesisFile(); if (ecCurve.isEmpty()) { SignatureAlgorithmFactory.setDefaultInstance(); return; } try { SignatureAlgorithmFactory.setInstance(SignatureAlgorithmType.create(ecCurve.get())); } catch (IllegalArgumentException e) { throw new CommandLine.InitializationException(e.getMessage()); } } private Optional<String> getEcCurveFromGenesisFile() { if (genesisFile == null) { return Optional.empty(); } GenesisConfigOptions options = readGenesisConfigOptions(); return options.getEcCurve(); } }
1
24,999
I moved the instantiation of the signature algorithm here, to execute it before the native libraries are configured. Otherwise the wrong signature algorithm could be configured in a future release when more than one is supported in Besu.
hyperledger-besu
java
@@ -1,11 +1,16 @@ +from typing import TYPE_CHECKING + from astroid import nodes from pylint.checkers import BaseChecker from pylint.interfaces import IAstroidChecker +if TYPE_CHECKING: + from pylint.lint import PyLinter -# This is our checker class. # Checkers should always inherit from `BaseChecker`. + + class MyAstroidChecker(BaseChecker): """Add class member attributes to the class local's dictionary."""
1
from astroid import nodes from pylint.checkers import BaseChecker from pylint.interfaces import IAstroidChecker # This is our checker class. # Checkers should always inherit from `BaseChecker`. class MyAstroidChecker(BaseChecker): """Add class member attributes to the class local's dictionary.""" # This class variable defines the type of checker that we are implementing. # In this case, we are implementing an AST checker. __implements__ = IAstroidChecker # The name defines a custom section of the config for this checker. name = "custom" # The priority indicates the order that pylint will run the checkers. priority = -1 # This class variable declares the messages (ie the warnings and errors) # that the checker can emit. msgs = { # Each message has a code, a message that the user will see, # a unique symbol that identifies the message, # and a detailed help message # that will be included in the documentation. "W0001": ("Message that will be emitted", "message-symbol", "Message help") } # This class variable declares the options # that are configurable by the user. options = ( # Each option definition has a name which is used on the command line # and in config files, and a dictionary of arguments # (similar to argparse.ArgumentParser.add_argument). ( "store-locals-indicator", { "default": "properties", "help": ( "The expression name that indicates that the locals should " "be stored" ), }, ), ) def visit_call(self, node: nodes.Call) -> None: """Called when a :class:`.nodes.Call` node is visited. See :mod:`astroid` for the description of available nodes.""" if not ( isinstance(node.func, nodes.Attribute) and isinstance(node.func.expr, nodes.Name) and node.func.expr.name == self.config.store_locals_indicator and node.func.attrname == "create" ): return in_class = node.frame() for param in node.args: in_class.locals[param.name] = node def register(linter): """This required method auto registers the checker. :param linter: The linter to register the checker to. :type linter: pylint.lint.PyLinter """ linter.register_checker(MyAstroidChecker(linter))
1
19,277
Why remove this?
PyCQA-pylint
py
@@ -490,6 +490,18 @@ class Builder { return this; } + /** + * Sets the {@link ie.ServiceBuilder} to use to manage the geckodriver + * child process when creating IE sessions locally. + * + * @param {ie.ServiceBuilder} service the service to use. + * @return {!Builder} a self reference. + */ + setIeService(service) { + this.ieService_ = service; + return this; + } + /** * Set {@linkplain edge.Options options} specific to Microsoft's Edge browser * for drivers created by this builder. Any proxy settings defined on the
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. /** * @fileoverview The main user facing module. Exports WebDriver's primary * public API and provides convenience assessors to certain sub-modules. */ 'use strict'; const _http = require('./http'); const by = require('./lib/by'); const capabilities = require('./lib/capabilities'); const chrome = require('./chrome'); const command = require('./lib/command'); const edge = require('./edge'); const error = require('./lib/error'); const firefox = require('./firefox'); const ie = require('./ie'); const input = require('./lib/input'); const logging = require('./lib/logging'); const promise = require('./lib/promise'); const remote = require('./remote'); const safari = require('./safari'); const session = require('./lib/session'); const until = require('./lib/until'); const webdriver = require('./lib/webdriver'); const Browser = capabilities.Browser; const Capabilities = capabilities.Capabilities; const Capability = capabilities.Capability; const Session = session.Session; const WebDriver = webdriver.WebDriver; var seleniumServer; /** * Starts an instance of the Selenium server if not yet running. * @param {string} jar Path to the server jar to use. * @return {!Promise<string>} A promise for the server's * address once started. */ function startSeleniumServer(jar) { if (!seleniumServer) { seleniumServer = new remote.SeleniumServer(jar); } return seleniumServer.start(); } /** * {@linkplain webdriver.WebDriver#setFileDetector WebDriver's setFileDetector} * method uses a non-standard command to transfer files from the local client * to the remote end hosting the browser. Many of the WebDriver sub-types, like * the {@link chrome.Driver} and {@link firefox.Driver}, do not support this * command. Thus, these classes override the `setFileDetector` to no-op. * * This function uses a mixin to re-enable `setFileDetector` by calling the * original method on the WebDriver prototype directly. This is used only when * the builder creates a Chrome or Firefox instance that communicates with a * remote end (and thus, support for remote file detectors is unknown). * * @param {function(new: webdriver.WebDriver, ...?)} ctor * @return {function(new: webdriver.WebDriver, ...?)} */ function ensureFileDetectorsAreEnabled(ctor) { const mixin = class extends ctor { /** @param {input.FileDetector} detector */ setFileDetector(detector) { webdriver.WebDriver.prototype.setFileDetector.call(this, detector); } }; return mixin; } /** * A thenable wrapper around a {@linkplain webdriver.IWebDriver IWebDriver} * instance that allows commands to be issued directly instead of having to * repeatedly call `then`: * * let driver = new Builder().build(); * driver.then(d => d.get(url)); // You can do this... * driver.get(url); // ...or this * * If the driver instance fails to resolve (e.g. the session cannot be created), * every issued command will fail. * * @extends {webdriver.IWebDriver} * @extends {IThenable<!webdriver.IWebDriver>} * @interface */ class ThenableWebDriver { /** @param {...?} args */ static createSession(...args) {} } /** * @const {!Map<function(new: WebDriver, !IThenable<!Session>, ...?), * function(new: ThenableWebDriver, !IThenable<!Session>, ...?)>} */ const THENABLE_DRIVERS = new Map; /** * @param {function(new: WebDriver, !IThenable<!Session>, ...?)} ctor * @param {...?} args * @return {!ThenableWebDriver} */ function createDriver(ctor, ...args) { let thenableWebDriverProxy = THENABLE_DRIVERS.get(ctor); if (!thenableWebDriverProxy) { /** * @extends {WebDriver} // Needed since `ctor` is dynamically typed. * @implements {ThenableWebDriver} */ thenableWebDriverProxy = class extends ctor { /** * @param {!IThenable<!Session>} session * @param {...?} rest */ constructor(session, ...rest) { super(session, ...rest); const pd = this.getSession().then(session => { return new ctor(session, ...rest); }); /** @override */ this.then = pd.then.bind(pd); /** @override */ this.catch = pd.catch.bind(pd); } }; THENABLE_DRIVERS.set(ctor, thenableWebDriverProxy); } return thenableWebDriverProxy.createSession(...args); } /** * Creates new {@link webdriver.WebDriver WebDriver} instances. The environment * variables listed below may be used to override a builder's configuration, * allowing quick runtime changes. * * - {@code SELENIUM_BROWSER}: defines the target browser in the form * {@code browser[:version][:platform]}. * * - {@code SELENIUM_REMOTE_URL}: defines the remote URL for all builder * instances. This environment variable should be set to a fully qualified * URL for a WebDriver server (e.g. http://localhost:4444/wd/hub). This * option always takes precedence over {@code SELENIUM_SERVER_JAR}. * * - {@code SELENIUM_SERVER_JAR}: defines the path to the * <a href="http://selenium-release.storage.googleapis.com/index.html"> * standalone Selenium server</a> jar to use. The server will be started the * first time a WebDriver instance and be killed when the process exits. * * Suppose you had mytest.js that created WebDriver with * * var driver = new webdriver.Builder() * .forBrowser('chrome') * .build(); * * This test could be made to use Firefox on the local machine by running with * `SELENIUM_BROWSER=firefox node mytest.js`. Rather than change the code to * target Google Chrome on a remote machine, you can simply set the * `SELENIUM_BROWSER` and `SELENIUM_REMOTE_URL` environment variables: * * SELENIUM_BROWSER=chrome:36:LINUX \ * SELENIUM_REMOTE_URL=http://www.example.com:4444/wd/hub \ * node mytest.js * * You could also use a local copy of the standalone Selenium server: * * SELENIUM_BROWSER=chrome:36:LINUX \ * SELENIUM_SERVER_JAR=/path/to/selenium-server-standalone.jar \ * node mytest.js */ class Builder { constructor() { /** @private @const */ this.log_ = logging.getLogger('webdriver.Builder'); /** @private {string} */ this.url_ = ''; /** @private {?string} */ this.proxy_ = null; /** @private {!Capabilities} */ this.capabilities_ = new Capabilities(); /** @private {chrome.Options} */ this.chromeOptions_ = null; /** @private {chrome.ServiceBuilder} */ this.chromeService_ = null; /** @private {firefox.Options} */ this.firefoxOptions_ = null; /** @private {firefox.ServiceBuilder} */ this.firefoxService_ = null; /** @private {ie.Options} */ this.ieOptions_ = null; /** @private {safari.Options} */ this.safariOptions_ = null; /** @private {edge.Options} */ this.edgeOptions_ = null; /** @private {remote.DriverService.Builder} */ this.edgeService_ = null; /** @private {boolean} */ this.ignoreEnv_ = false; /** @private {http.Agent} */ this.agent_ = null; } /** * Configures this builder to ignore any environment variable overrides and to * only use the configuration specified through this instance's API. * * @return {!Builder} A self reference. */ disableEnvironmentOverrides() { this.ignoreEnv_ = true; return this; } /** * Sets the URL of a remote WebDriver server to use. Once a remote URL has * been specified, the builder direct all new clients to that server. If this * method is never called, the Builder will attempt to create all clients * locally. * * As an alternative to this method, you may also set the * `SELENIUM_REMOTE_URL` environment variable. * * @param {string} url The URL of a remote server to use. * @return {!Builder} A self reference. */ usingServer(url) { this.url_ = url; return this; } /** * @return {string} The URL of the WebDriver server this instance is * configured to use. */ getServerUrl() { return this.url_; } /** * Sets the URL of the proxy to use for the WebDriver's HTTP connections. * If this method is never called, the Builder will create a connection * without a proxy. * * @param {string} proxy The URL of a proxy to use. * @return {!Builder} A self reference. */ usingWebDriverProxy(proxy) { this.proxy_ = proxy; return this; } /** * @return {?string} The URL of the proxy server to use for the WebDriver's * HTTP connections, or `null` if not set. */ getWebDriverProxy() { return this.proxy_; } /** * Sets the http agent to use for each request. * If this method is not called, the Builder will use http.globalAgent by default. * * @param {http.Agent} agent The agent to use for each request. * @return {!Builder} A self reference. */ usingHttpAgent(agent) { this.agent_ = agent; return this; } /** * @return {http.Agent} The http agent used for each request */ getHttpAgent() { return this.agent_; } /** * Sets the desired capabilities when requesting a new session. This will * overwrite any previously set capabilities. * @param {!(Object|Capabilities)} capabilities The desired capabilities for * a new session. * @return {!Builder} A self reference. */ withCapabilities(capabilities) { this.capabilities_ = new Capabilities(capabilities); return this; } /** * Returns the base set of capabilities this instance is currently configured * to use. * @return {!Capabilities} The current capabilities for this builder. */ getCapabilities() { return this.capabilities_; } /** * Configures the target browser for clients created by this instance. * Any calls to {@link #withCapabilities} after this function will * overwrite these settings. * * You may also define the target browser using the {@code SELENIUM_BROWSER} * environment variable. If set, this environment variable should be of the * form `browser[:[version][:platform]]`. * * @param {(string|!Browser)} name The name of the target browser; * common defaults are available on the {@link webdriver.Browser} enum. * @param {string=} opt_version A desired version; may be omitted if any * version should be used. * @param {(string|!capabilities.Platform)=} opt_platform * The desired platform; may be omitted if any platform may be used. * @return {!Builder} A self reference. */ forBrowser(name, opt_version, opt_platform) { this.capabilities_.setBrowserName(name); if (opt_version) { this.capabilities_.setBrowserVersion(opt_version); } if (opt_platform) { this.capabilities_.setPlatform(opt_platform); } return this; } /** * Sets the proxy configuration for the target browser. * Any calls to {@link #withCapabilities} after this function will * overwrite these settings. * * @param {!./lib/proxy.Config} config The configuration to use. * @return {!Builder} A self reference. */ setProxy(config) { this.capabilities_.setProxy(config); return this; } /** * Sets the logging preferences for the created session. Preferences may be * changed by repeated calls, or by calling {@link #withCapabilities}. * @param {!(./lib/logging.Preferences|Object<string, string>)} prefs The * desired logging preferences. * @return {!Builder} A self reference. */ setLoggingPrefs(prefs) { this.capabilities_.setLoggingPrefs(prefs); return this; } /** * Sets the default action to take with an unexpected alert before returning * an error. * * @param {?capabilities.UserPromptHandler} behavior The desired behavior. * @return {!Builder} A self reference. * @see capabilities.Capabilities#setAlertBehavior */ setAlertBehavior(behavior) { this.capabilities_.setAlertBehavior(behavior); return this; } /** * Sets Chrome specific {@linkplain chrome.Options options} for drivers * created by this builder. Any logging or proxy settings defined on the given * options will take precedence over those set through * {@link #setLoggingPrefs} and {@link #setProxy}, respectively. * * @param {!chrome.Options} options The ChromeDriver options to use. * @return {!Builder} A self reference. */ setChromeOptions(options) { this.chromeOptions_ = options; return this; } /** * @return {chrome.Options} the Chrome specific options currently configured * for this builder. */ getChromeOptions() { return this.chromeOptions_; } /** * Sets the service builder to use for managing the chromedriver child process * when creating new Chrome sessions. * * @param {chrome.ServiceBuilder} service the service to use. * @return {!Builder} A self reference. */ setChromeService(service) { if (service && !(service instanceof chrome.ServiceBuilder)) { throw TypeError('not a chrome.ServiceBuilder object'); } this.chromeService_ = service; return this; } /** * Sets Firefox specific {@linkplain firefox.Options options} for drivers * created by this builder. Any logging or proxy settings defined on the given * options will take precedence over those set through * {@link #setLoggingPrefs} and {@link #setProxy}, respectively. * * @param {!firefox.Options} options The FirefoxDriver options to use. * @return {!Builder} A self reference. */ setFirefoxOptions(options) { this.firefoxOptions_ = options; return this; } /** * @return {firefox.Options} the Firefox specific options currently configured * for this instance. */ getFirefoxOptions() { return this.firefoxOptions_; } /** * Sets the {@link firefox.ServiceBuilder} to use to manage the geckodriver * child process when creating Firefox sessions locally. * * @param {firefox.ServiceBuilder} service the service to use. * @return {!Builder} a self reference. */ setFirefoxService(service) { if (service && !(service instanceof firefox.ServiceBuilder)) { throw TypeError('not a firefox.ServiceBuilder object'); } this.firefoxService_ = service; return this; } /** * Set Internet Explorer specific {@linkplain ie.Options options} for drivers * created by this builder. Any proxy settings defined on the given options * will take precedence over those set through {@link #setProxy}. * * @param {!ie.Options} options The IEDriver options to use. * @return {!Builder} A self reference. */ setIeOptions(options) { this.ieOptions_ = options; return this; } /** * Set {@linkplain edge.Options options} specific to Microsoft's Edge browser * for drivers created by this builder. Any proxy settings defined on the * given options will take precedence over those set through * {@link #setProxy}. * * @param {!edge.Options} options The MicrosoftEdgeDriver options to use. * @return {!Builder} A self reference. */ setEdgeOptions(options) { this.edgeOptions_ = options; return this; } /** * Sets the {@link edge.ServiceBuilder} to use to manage the * MicrosoftEdgeDriver child process when creating sessions locally. * * @param {edge.ServiceBuilder} service the service to use. * @return {!Builder} a self reference. */ setEdgeService(service) { if (service && !(service instanceof edge.ServiceBuilder)) { throw TypeError('not a edge.ServiceBuilder object'); } this.edgeService_ = service; return this; } /** * Sets Safari specific {@linkplain safari.Options options} for drivers * created by this builder. Any logging settings defined on the given options * will take precedence over those set through {@link #setLoggingPrefs}. * * @param {!safari.Options} options The Safari options to use. * @return {!Builder} A self reference. */ setSafariOptions(options) { this.safariOptions_ = options; return this; } /** * @return {safari.Options} the Safari specific options currently configured * for this instance. */ getSafariOptions() { return this.safariOptions_; } /** * Creates a new WebDriver client based on this builder's current * configuration. * * This method will return a {@linkplain ThenableWebDriver} instance, allowing * users to issue commands directly without calling `then()`. The returned * thenable wraps a promise that will resolve to a concrete * {@linkplain webdriver.WebDriver WebDriver} instance. The promise will be * rejected if the remote end fails to create a new session. * * @return {!ThenableWebDriver} A new WebDriver instance. * @throws {Error} If the current configuration is invalid. */ build() { // Create a copy for any changes we may need to make based on the current // environment. var capabilities = new Capabilities(this.capabilities_); var browser; if (!this.ignoreEnv_ && process.env.SELENIUM_BROWSER) { this.log_.fine(`SELENIUM_BROWSER=${process.env.SELENIUM_BROWSER}`); browser = process.env.SELENIUM_BROWSER.split(/:/, 3); capabilities.setBrowserName(browser[0]); browser[1] && capabilities.setBrowserVersion(browser[1]); browser[2] && capabilities.setPlatform(browser[2]); } browser = capabilities.get(Capability.BROWSER_NAME); if (typeof browser !== 'string') { throw TypeError( `Target browser must be a string, but is <${typeof browser}>;` + ' did you forget to call forBrowser()?'); } if (browser === 'ie') { browser = Browser.INTERNET_EXPLORER; } // Apply browser specific overrides. if (browser === Browser.CHROME && this.chromeOptions_) { capabilities.merge(this.chromeOptions_); } else if (browser === Browser.FIREFOX && this.firefoxOptions_) { capabilities.merge(this.firefoxOptions_); } else if (browser === Browser.INTERNET_EXPLORER && this.ieOptions_) { capabilities.merge(this.ieOptions_); } else if (browser === Browser.SAFARI && this.safariOptions_) { capabilities.merge(this.safariOptions_); } else if (browser === Browser.EDGE && this.edgeOptions_) { capabilities.merge(this.edgeOptions_); } checkOptions( capabilities, 'chromeOptions', chrome.Options, 'setChromeOptions'); checkOptions( capabilities, 'moz:firefoxOptions', firefox.Options, 'setFirefoxOptions'); checkOptions( capabilities, 'safari.options', safari.Options, 'setSafariOptions'); // Check for a remote browser. let url = this.url_; if (!this.ignoreEnv_) { if (process.env.SELENIUM_REMOTE_URL) { this.log_.fine( `SELENIUM_REMOTE_URL=${process.env.SELENIUM_REMOTE_URL}`); url = process.env.SELENIUM_REMOTE_URL; } else if (process.env.SELENIUM_SERVER_JAR) { this.log_.fine( `SELENIUM_SERVER_JAR=${process.env.SELENIUM_SERVER_JAR}`); url = startSeleniumServer(process.env.SELENIUM_SERVER_JAR); } } if (url) { this.log_.fine('Creating session on remote server'); let client = Promise.resolve(url) .then(url => new _http.HttpClient(url, this.agent_, this.proxy_)); let executor = new _http.Executor(client); if (browser === Browser.CHROME) { const driver = ensureFileDetectorsAreEnabled(chrome.Driver); return createDriver(driver, capabilities, executor); } if (browser === Browser.FIREFOX) { const driver = ensureFileDetectorsAreEnabled(firefox.Driver); return createDriver(driver, capabilities, executor); } return createDriver(WebDriver, executor, capabilities); } // Check for a native browser. switch (browser) { case Browser.CHROME: { let service = null; if (this.chromeService_) { service = this.chromeService_.build(); } return createDriver(chrome.Driver, capabilities, service); } case Browser.FIREFOX: { let service = null; if (this.firefoxService_) { service = this.firefoxService_.build(); } return createDriver(firefox.Driver, capabilities, service); } case Browser.INTERNET_EXPLORER: return createDriver(ie.Driver, capabilities); case Browser.EDGE: { let service = null; if (this.edgeService_) { service = this.edgeService_.build(); } return createDriver(edge.Driver, capabilities, service); } case Browser.SAFARI: return createDriver(safari.Driver, capabilities); default: throw new Error('Do not know how to build driver: ' + browser + '; did you forget to call usingServer(url)?'); } } } /** * In the 3.x releases, the various browser option classes * (e.g. firefox.Options) had to be manually set as an option using the * Capabilties class: * * let ffo = new firefox.Options(); * // Configure firefox options... * * let caps = new Capabilities(); * caps.set('moz:firefoxOptions', ffo); * * let driver = new Builder() * .withCapabilities(caps) * .build(); * * The options are now subclasses of Capabilities and can be used directly. A * direct translation of the above is: * * let ffo = new firefox.Options(); * // Configure firefox options... * * let driver = new Builder() * .withCapabilities(ffo) * .build(); * * You can also set the options for various browsers at once and let the builder * choose the correct set at runtime (see Builder docs above): * * let ffo = new firefox.Options(); * // Configure ... * * let co = new chrome.Options(); * // Configure ... * * let driver = new Builder() * .setChromeOptions(co) * .setFirefoxOptions(ffo) * .build(); * * @param {!Capabilities} caps * @param {string} key * @param {function(new: Capabilities)} optionType * @param {string} setMethod * @throws {error.InvalidArgumentError} */ function checkOptions(caps, key, optionType, setMethod) { let val = caps.get(key); if (val instanceof optionType) { throw new error.InvalidArgumentError( 'Options class extends Capabilities and should not be set as key ' + `"${key}"; set browser-specific options with ` + `Builder.${setMethod}(). For more information, see the ` + 'documentation attached to the function that threw this error'); } } // PUBLIC API exports.Browser = capabilities.Browser; exports.Builder = Builder; exports.Button = input.Button; exports.By = by.By; exports.Capabilities = capabilities.Capabilities; exports.Capability = capabilities.Capability; exports.Condition = webdriver.Condition; exports.FileDetector = input.FileDetector; exports.Key = input.Key; exports.Origin = input.Origin; exports.Session = session.Session; exports.ThenableWebDriver = ThenableWebDriver; exports.WebDriver = webdriver.WebDriver; exports.WebElement = webdriver.WebElement; exports.WebElementCondition = webdriver.WebElementCondition; exports.WebElementPromise = webdriver.WebElementPromise; exports.error = error; exports.logging = logging; exports.promise = promise; exports.until = until;
1
15,736
`this.ieService_` should be initialized to null in the constructor.
SeleniumHQ-selenium
py
@@ -394,7 +394,7 @@ int main(int argc, char **argv) cflags.push_back(generate_header_filter_cflag(response_header_filters)); } - ebpf::BPF *bpf = new ebpf::BPF(); + std::unique_ptr<ebpf::BPF> bpf(new ebpf::BPF()); std::vector<ebpf::USDT> probes; bool selective_tracing = false;
1
/* * Copyright (c) 2019-2021 Fastly, Inc., Toru Maesaka, Goro Fuji, Kazuho Oku * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <memory> #include <vector> #include <algorithm> #include <bcc/BPF.h> #include <bcc/libbpf.h> extern "C" { #include <unistd.h> #include <stdarg.h> #include <limits.h> #include <arpa/inet.h> #include <sys/time.h> #include "h2o/memory.h" #include "h2o/version.h" #include "h2o/ebpf.h" } #include "h2olog.h" #define POLL_TIMEOUT (1000) #define PERF_BUFFER_PAGE_COUNT 256 static void usage(void) { printf(R"(h2olog (h2o v%s) Usage: h2olog -p PID Optional arguments: -d Print debugging information (-dd shows more). -h Print this help and exit. -l Print the list of available tracepoints and exit. -H Trace HTTP requests and responses in varnishlog-like format. -s <header-name> A response header name to show, e.g. "content-type". -t <tracepoint> A tracepoint, or fully-qualified probe name to trace. Glob patterns can be used; e.g., "quicly:accept", "h2o:*". -S <rate> Enable random sampling per connection (0.0-1.0). Requires use of `usdt-selective-tracing`. -A <ip-address> Limit connections being traced to those coming from the specified address. Requries use of `usdt-selective-tracing`. -N <server-name> Limit connections being traced to those carrying the specified name in the TLS SNI extension. Requires use of `usdt-selective-tracing: ON`. -a Include application data which are omitted by default. -r Run without dropping root privilege. -w <path> Path to write the output (default: stdout). Examples: h2olog -p $(pgrep -o h2o) -H h2olog -p $(pgrep -o h2o) -t quicly:accept -t quicly:free h2olog -p $(pgrep -o h2o) -t h2o:send_response_header -t h2o:h3s_accept \ -t h2o:h3s_destroy -s alt-svc )", H2O_VERSION); return; } static void make_timestamp(char *buf, size_t buf_len) { time_t t = time(NULL); struct tm tms; localtime_r(&t, &tms); const char *iso8601format = "%FT%TZ"; strftime(buf, buf_len, iso8601format, &tms); } static void infof(const char *fmt, ...) __attribute__((format(printf, 1, 2))); static void infof(const char *fmt, ...) { char buf[1024]; va_list args; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); char timestamp[128]; make_timestamp(timestamp, sizeof(timestamp)); fprintf(stderr, "%s %s\n", timestamp, buf); } uint64_t h2o_tracer::time_milliseconds() { struct timeval tv; gettimeofday(&tv, NULL); return (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; } void h2o_tracer::show_event_per_sec(time_t *t0) { time_t t1 = time(NULL); int64_t d = t1 - *t0; if (d > 10) { uint64_t c = stats_.num_events / d; if (c > 0) { if (stats_.num_lost > 0) { infof("%20" PRIu64 " events/s (possibly lost %" PRIu64 " events)", c, stats_.num_lost); stats_.num_lost = 0; } else { infof("%20" PRIu64 " events/s", c); } stats_.num_events = 0; } *t0 = t1; } } static void show_process(pid_t pid) { char cmdline[256]; char proc_file[256]; snprintf(proc_file, sizeof(proc_file), "/proc/%d/cmdline", pid); FILE *f = fopen(proc_file, "r"); if (f == nullptr) { fprintf(stderr, "Error: failed to open %s: %s\n", proc_file, strerror(errno)); exit(EXIT_FAILURE); } size_t nread = fread(cmdline, 1, sizeof(cmdline), f); fclose(f); if (nread == 0) { fprintf(stderr, "Error: failed to read from %s: %s\n", proc_file, strerror(errno)); exit(EXIT_FAILURE); } nread--; // skip trailing nul for (size_t i = 0; i < nread; i++) { if (cmdline[i] == '\0') { cmdline[i] = ' '; } } infof("Attaching pid=%d (%s)", pid, cmdline); } static void drop_root_privilege(void) { if (getuid() == 0) { const char *sudo_gid = getenv("SUDO_GID"); if (sudo_gid == NULL) { fprintf(stderr, "Error: the SUDO_GID environment variable is not set\n"); exit(EXIT_FAILURE); } errno = 0; gid_t gid = (gid_t)strtol(sudo_gid, NULL, 10); if (errno != 0) { fprintf(stderr, "Error: failed to parse SUDO_GID\n"); exit(EXIT_FAILURE); } if (setgid(gid) != 0) { perror("Error: setgid(2) failed"); exit(EXIT_FAILURE); } const char *sudo_uid = getenv("SUDO_UID"); if (sudo_uid == NULL) { fprintf(stderr, "Error: the SUDO_UID environment variable is not set\n"); exit(EXIT_FAILURE); } errno = 0; uid_t uid = (uid_t)strtol(sudo_uid, NULL, 10); if (errno != 0) { fprintf(stderr, "Error: failed to parse SUDO_UID\n"); exit(EXIT_FAILURE); } if (setuid(uid) != 0) { perror("Error: setuid(2) failed"); exit(EXIT_FAILURE); } } } static std::string join_str(const std::string &sep, const std::vector<std::string> &strs) { std::string s; for (auto iter = strs.cbegin(); iter != strs.cend(); ++iter) { if (iter != strs.cbegin()) { s += sep; } s += *iter; } return s; } static std::string generate_header_filter_cflag(const std::vector<std::string> &tokens) { std::vector<std::string> conditions; for (auto &token : tokens) { char buf[256]; snprintf(buf, sizeof(buf), "/* %s */ (slen) == %zu", token.c_str(), token.size()); std::vector<std::string> exprs = {buf}; for (size_t i = 0; i < token.size(); ++i) { snprintf(buf, sizeof(buf), "(s)[%zu] == '%c'", i, token[i]); exprs.push_back(buf); } conditions.push_back("(" + join_str(" && ", exprs) + ")"); } std::string cflag("-DCHECK_ALLOWED_RES_HEADER_NAME(s,slen)=("); cflag += join_str(" || ", conditions); cflag += ")"; return cflag; } static void event_cb(void *context, void *data, int len) { h2o_tracer *tracer = (h2o_tracer *)context; tracer->handle_event(data, len); } static void lost_cb(void *context, uint64_t lost) { h2o_tracer *tracer = (h2o_tracer *)context; tracer->handle_lost(lost); } /** * Builds a `-D$name=$expr` style cc macro. */ static std::string build_cc_macro_expr(const char *name, const std::string &expr) { return std::string("-D") + std::string(name) + "=" + expr; } template <typename T> static std::string build_cc_macro_expr(const char *name, const T &expr) { return build_cc_macro_expr(name, std::to_string(expr)); } /** * Builds a `-D$name="$str"` style cc macro. */ static std::string build_cc_macro_str(const char *name, const std::string &str) { return build_cc_macro_expr(name, "\"" + str + "\""); } #define CC_MACRO_EXPR(name) build_cc_macro_expr(#name, name) #define CC_MACRO_STR(name) build_cc_macro_str(#name, name) int main(int argc, char **argv) { std::unique_ptr<h2o_tracer> tracer(create_raw_tracer()); int debug = 0; bool preserve_root = false; bool list_usdts = false; bool include_appdata = false; FILE *outfp = stdout; std::vector<std::string> response_header_filters; int c; pid_t h2o_pid = -1; double sampling_rate = 1.0; std::vector<std::pair<std::vector<uint8_t> /* address */, unsigned /* netmask */>> sampling_addresses; std::vector<std::string> sampling_snis; while ((c = getopt(argc, argv, "hHdrlap:t:s:w:S:A:N:")) != -1) { switch (c) { case 'H': tracer.reset(create_http_tracer()); break; case 'p': h2o_pid = atoi(optarg); break; case 't': { std::string err = tracer->select_usdts(optarg); if (!err.empty()) { fprintf(stderr, "%s\n", err.c_str()); exit(EXIT_FAILURE); } break; } case 's': response_header_filters.push_back(optarg); break; case 'w': if ((outfp = fopen(optarg, "w")) == nullptr) { fprintf(stderr, "Error: failed to open %s: %s", optarg, strerror(errno)); exit(EXIT_FAILURE); } break; case 'S': // can take 0.0 ... 1.0 sampling_rate = atof(optarg); if (!(sampling_rate >= 0.0 && sampling_rate <= 1.0)) { fprintf(stderr, "Error: the argument of -S must be in the range of 0.0 to 1.0\n"); exit(EXIT_FAILURE); } break; case 'A': { const char *slash = std::find(optarg, optarg + strlen(optarg), '/'); std::string addr(optarg, slash - optarg); in_addr v4; in6_addr v6; if (inet_pton(AF_INET, addr.c_str(), (sockaddr *)&v4) == 1) { const uint8_t *src = reinterpret_cast<const uint8_t *>(&v4); sampling_addresses.emplace_back(std::vector<uint8_t>(src, src + 4), 32); } else if (inet_pton(AF_INET6, addr.c_str(), (sockaddr *)&v6) == 1) { const uint8_t *src = reinterpret_cast<const uint8_t *>(&v6); sampling_addresses.emplace_back(std::vector<uint8_t>(src, src + 16), 128); } else { fprintf(stderr, "Error: invalid address supplied to -A: %s\n", optarg); exit(EXIT_FAILURE); } if (*slash != '\0') { if (sscanf(slash + 1, "%u", &sampling_addresses.back().second) != 1) { fprintf(stderr, "Error: invalid address mask supplied to -A: %s\n", optarg); exit(EXIT_FAILURE); } } } break; case 'N': sampling_snis.emplace_back(optarg); break; case 'a': include_appdata = true; break; case 'd': debug++; break; case 'l': list_usdts = true; break; case 'r': preserve_root = true; break; case 'h': usage(); exit(EXIT_SUCCESS); default: usage(); exit(EXIT_FAILURE); } } if (argc > optind) { fprintf(stderr, "Error: too many aruments\n"); usage(); exit(EXIT_FAILURE); } if (list_usdts) { for (const auto &usdt : tracer->usdt_probes()) { printf("%s\n", usdt.fully_qualified_name().c_str()); } exit(EXIT_SUCCESS); } if (h2o_pid == -1) { fprintf(stderr, "Error: -p option is missing\n"); usage(); exit(EXIT_FAILURE); } if (geteuid() != 0) { fprintf(stderr, "Error: root privilege is required\n"); exit(EXIT_FAILURE); } tracer->init(outfp, include_appdata); const char *h2o_root = getenv("H2O_ROOT"); if (h2o_root == NULL) h2o_root = H2O_TO_STR(H2O_ROOT); // BCC does not resolve a relative path, so h2olog does resolve it as an absolute path. char h2o_root_resolved[PATH_MAX]; if (realpath(h2o_root, h2o_root_resolved) == NULL) { h2o_perror("Error: realpath failed for H2O_ROOT"); exit(EXIT_FAILURE); } std::vector<std::string> cflags({ std::string("-I") + std::string(h2o_root_resolved) + "/include", build_cc_macro_expr("H2OLOG_H2O_PID", h2o_pid), CC_MACRO_EXPR(H2O_EBPF_RETURN_MAP_SIZE), }); if (!response_header_filters.empty()) { cflags.push_back(generate_header_filter_cflag(response_header_filters)); } ebpf::BPF *bpf = new ebpf::BPF(); std::vector<ebpf::USDT> probes; bool selective_tracing = false; if (sampling_rate < 1.0) { /* eBPF bytecode cannot handle floating point numbers see man bpf(2). We use uint32_t which maps to 0 <= value < 1. */ cflags.push_back( build_cc_macro_expr("H2OLOG_SAMPLING_RATE_U32", static_cast<uint32_t>(sampling_rate * (UINT64_C(1) << 32)))); selective_tracing = true; } if (!sampling_addresses.empty()) { std::string expr; for (const auto &addrmask : sampling_addresses) { if (!expr.empty()) expr += " || "; expr += "((family) == "; expr += addrmask.first.size() == 4 ? '4' : '6'; size_t off; for (off = 0; off < addrmask.second / 8 * 8; off += 8) { expr += " && (addr)["; expr += std::to_string(off / 8); expr += "] == "; expr += std::to_string(addrmask.first[off / 8]); } if (addrmask.second % 8 != 0) { expr += " && ((addr)["; expr += std::to_string(off / 8); expr += "] & "; expr += std::to_string((uint8_t)(0xff << (8 - addrmask.second % 8))); expr += ") == "; expr += std::to_string(addrmask.first[off / 8]); } expr += ')'; } cflags.push_back(build_cc_macro_expr("H2OLOG_IS_SAMPLING_ADDRESS(family, addr)", std::string("(") + expr + ")")); selective_tracing = true; } if (!sampling_snis.empty()) { /* if both address- and sni-based sampling are used, the output will be the union of both */ if (sampling_addresses.empty()) cflags.push_back(build_cc_macro_expr("H2OLOG_IS_SAMPLING_ADDRESS(family, addr)", 0)); std::string expr; for (const auto &name : sampling_snis) { if (!expr.empty()) expr += " || "; expr += "((name_len) == "; expr += std::to_string(name.size()); /* as string constants cannot be used in eBPF, do hand-written memcmp */ for (size_t i = 0; i < name.size(); i += 8) { uint64_t u8 = 0, mask = 0; memcpy(&u8, name.c_str() + i, std::min((size_t)8, name.size() - i)); expr += " && (*(uint64_t *)((name) + "; expr += std::to_string(i); expr += ")"; if (name.size() - i < 8) { static const uint8_t mask_bytes[14] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; /* 7x 0xff, 7x 0x00 */ memcpy(&mask, mask_bytes + 7 - (name.size() - i), 8); expr += " & "; expr += std::to_string(mask); } expr += ") == "; expr += std::to_string(u8); } expr += ")"; } cflags.push_back(build_cc_macro_expr("H2OLOG_IS_SAMPLING_SNI(name, name_len)", std::string("(") + expr + ")")); selective_tracing = true; } if (selective_tracing) { cflags.push_back(build_cc_macro_expr("H2OLOG_SELECTIVE_TRACING", 1)); probes.push_back(ebpf::USDT(h2o_pid, "h2o", "_private_socket_lookup_flags", "trace_h2o___private_socket_lookup_flags")); probes.push_back( ebpf::USDT(h2o_pid, "h2o", "_private_socket_lookup_flags_sni", "trace_h2o___private_socket_lookup_flags_sni")); } for (const auto &usdt : tracer->usdt_probes()) { probes.push_back(ebpf::USDT(h2o_pid, usdt.provider, usdt.name, usdt.probe_func)); } if (debug >= 2) { fprintf(stderr, "usdts="); const auto &usdts = tracer->usdt_probes(); for (auto iter = usdts.cbegin(); iter != usdts.cend(); iter++) { if (iter != usdts.cbegin()) { fprintf(stderr, ","); } fprintf(stderr, "%s", iter->fully_qualified_name().c_str()); } fprintf(stderr, "\n"); fprintf(stderr, "cflags="); for (size_t i = 0; i < cflags.size(); i++) { if (i > 0) { fprintf(stderr, " "); } fprintf(stderr, "%s", cflags[i].c_str()); } fprintf(stderr, "\n"); fprintf(stderr, "<BPF>\n%s\n</BPF>\n", tracer->bpf_text().c_str()); } ebpf::StatusTuple ret = bpf->init(tracer->bpf_text(), cflags, probes); if (ret.code() != 0) { fprintf(stderr, "Error: init: %s\n", ret.msg().c_str()); return EXIT_FAILURE; } bpf->attach_tracepoint("sched:sched_process_exit", "trace_sched_process_exit"); for (auto &probe : probes) { ret = bpf->attach_usdt(probe); if (ret.code() != 0) { fprintf(stderr, "Error: attach_usdt: %s\n", ret.msg().c_str()); return EXIT_FAILURE; } } ret = bpf->open_perf_buffer("events", event_cb, lost_cb, tracer.get(), PERF_BUFFER_PAGE_COUNT); if (ret.code() != 0) { fprintf(stderr, "Error: open_perf_buffer: %s\n", ret.msg().c_str()); return EXIT_FAILURE; } if (debug) { show_process(h2o_pid); } if (!preserve_root) { drop_root_privilege(); } ebpf::BPFPerfBuffer *perf_buffer = bpf->get_perf_buffer("events"); if (perf_buffer) { time_t t0 = time(NULL); while (true) { perf_buffer->poll(POLL_TIMEOUT); tracer->flush(); if (debug) { tracer->show_event_per_sec(&t0); } } } fprintf(stderr, "Error: failed to get_perf_buffer()\n"); return EXIT_FAILURE; }
1
15,398
Why not `ebpf::BPF bpf;`? Assuming that the instance is not huge, I do not think there is a good reason to use a pointer when it can be retained as a value.
h2o-h2o
c
@@ -19,8 +19,11 @@ package controller import ( "errors" "fmt" + "strings" "time" + apiutil "github.com/jetstack/cert-manager/pkg/api/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1"
1
/* Copyright 2020 The cert-manager Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controller import ( "errors" "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1" ) var ( errNilCertificate = errors.New("the supplied Certificate pointer was nil") errInvalidIngressAnnotation = errors.New("invalid ingress annotation") ) func translateIngressAnnotations(crt *cmapi.Certificate, annotations map[string]string) error { if crt == nil { return errNilCertificate } if commonName, found := annotations[cmapi.CommonNameAnnotationKey]; found { crt.Spec.CommonName = commonName } if duration, found := annotations[cmapi.DurationAnnotationKey]; found { duration, err := time.ParseDuration(duration) if err != nil { return fmt.Errorf("%w %q: %v", errInvalidIngressAnnotation, cmapi.DurationAnnotationKey, err) } crt.Spec.Duration = &metav1.Duration{Duration: duration} } if renewBefore, found := annotations[cmapi.RenewBeforeAnnotationKey]; found { duration, err := time.ParseDuration(renewBefore) if err != nil { return fmt.Errorf("%w %q: %v", errInvalidIngressAnnotation, cmapi.RenewBeforeAnnotationKey, err) } crt.Spec.RenewBefore = &metav1.Duration{Duration: duration} } return nil }
1
24,667
nit: move to last block
jetstack-cert-manager
go
@@ -15,8 +15,8 @@ import ( "syscall" "github.com/hashicorp/hcl" - "github.com/spiffe/spire/helpers" "github.com/spiffe/spire/pkg/agent" + "github.com/spiffe/spire/pkg/common/logger" ) const (
1
package command import ( "crypto/x509" "crypto/x509/pkix" "errors" "flag" "fmt" "io/ioutil" "net" "net/url" "os" "os/signal" "strconv" "syscall" "github.com/hashicorp/hcl" "github.com/spiffe/spire/helpers" "github.com/spiffe/spire/pkg/agent" ) const ( defaultConfigPath = ".conf/default_agent_config.hcl" defaultBindAddress = "127.0.0.1" defaultBindPort = "8081" // TODO: Make my defaults sane defaultDataDir = "." defaultLogLevel = "INFO" defaultPluginDir = "../../plugin/agent/.conf" ) // Struct representing available configurables for file and CLI // options type CmdConfig struct { ServerAddress string ServerPort int TrustDomain string TrustBundlePath string BindAddress string BindPort int DataDir string PluginDir string LogFile string LogLevel string } type RunCommand struct { } func (*RunCommand) Help() string { return setOptsFromCLI(newDefaultConfig(), []string{"-h"}).Error() } func (*RunCommand) Run(args []string) int { config := newDefaultConfig() err := setOptsFromFile(config, defaultConfigPath) if err != nil { fmt.Println(err.Error()) return 1 } err = setOptsFromCLI(config, args) if err != nil { fmt.Println(err.Error()) return 1 } err = validateConfig(config) if err != nil { fmt.Println(err.Error()) } // TODO: Handle graceful shutdown? signalListener(config.ErrorCh) agt := agent.New(config) err = agt.Run() if err != nil { config.Log.Error(err.Error()) return 1 } return 0 } func (*RunCommand) Synopsis() string { return "Runs the agent" } func setOptsFromFile(c *agent.Config, filePath string) error { fileConfig := &CmdConfig{} data, err := ioutil.ReadFile(filePath) if err != nil { return err } hclTree, err := hcl.Parse(string(data)) if err != nil { return err } if err := hcl.DecodeObject(&fileConfig, hclTree); err != nil { return err } return mergeAgentConfig(c, fileConfig) } func setOptsFromCLI(c *agent.Config, args []string) error { flags := flag.NewFlagSet("run", flag.ContinueOnError) cmdConfig := &CmdConfig{} flags.StringVar(&cmdConfig.ServerAddress, "serverAddress", "", "IP address or DNS name of the SPIRE server") flags.IntVar(&cmdConfig.ServerPort, "serverPort", 0, "Port number of the SPIRE server") flags.StringVar(&cmdConfig.TrustDomain, "trustDomain", "", "The trust domain that this agent belongs to") flags.StringVar(&cmdConfig.TrustBundlePath, "trustBundle", "", "Path to the SPIRE server CA bundle") flags.StringVar(&cmdConfig.BindAddress, "bindAddress", "", "Address that the workload API should bind to") flags.IntVar(&cmdConfig.BindPort, "bindPort", 0, "Port number that the workload API should listen on") flags.StringVar(&cmdConfig.DataDir, "dataDir", "", "A directory the agent can use for its runtime data") flags.StringVar(&cmdConfig.PluginDir, "pluginDir", "", "Plugin conf.d configuration directory") flags.StringVar(&cmdConfig.LogFile, "logFile", "", "File to write logs to") flags.StringVar(&cmdConfig.LogLevel, "logLevel", "", "DEBUG, INFO, WARN or ERROR") err := flags.Parse(args) if err != nil { return err } return mergeAgentConfig(c, cmdConfig) } func mergeAgentConfig(orig *agent.Config, cmd *CmdConfig) error { // Parse server address if cmd.ServerAddress != "" { ips, err := net.LookupIP(cmd.ServerAddress) if err != nil { return err } if len(ips) == 0 { return fmt.Errorf("Could not resolve ServerAddress %s", cmd.ServerAddress) } serverAddress := ips[0] orig.ServerAddress.IP = serverAddress } if cmd.ServerPort != 0 { orig.ServerAddress.Port = cmd.ServerPort } if cmd.TrustDomain != "" { trustDomain := url.URL{ Scheme: "spiffe", Host: cmd.TrustDomain, } orig.TrustDomain = trustDomain } // Parse trust bundle if cmd.TrustBundlePath != "" { bundle, err := parseTrustBundle(cmd.TrustBundlePath) if err != nil { return fmt.Errorf("Error parsing trust bundle: %s", err) } orig.TrustBundle = bundle } // Parse bind address if cmd.BindAddress != "" { ip := net.ParseIP(cmd.BindAddress) if ip == nil { return fmt.Errorf("BindAddress %s is not a valid IP", cmd.BindAddress) } orig.BindAddress.IP = ip } if cmd.BindPort != 0 { orig.BindAddress.Port = cmd.BindPort } if cmd.DataDir != "" { orig.DataDir = cmd.DataDir } if cmd.PluginDir != "" { orig.PluginDir = cmd.PluginDir } // Handle log file and level if cmd.LogFile != "" || cmd.LogLevel != "" { logLevel := defaultLogLevel if cmd.LogLevel != "" { logLevel = cmd.LogLevel } logger, err := helpers.NewLogger(logLevel, cmd.LogFile) if err != nil { return fmt.Errorf("Could not open log file %s: %s", cmd.LogFile, err) } orig.Log = logger } return nil } func validateConfig(c *agent.Config) error { if c.ServerAddress.IP == nil || c.ServerAddress.Port == 0 { return errors.New("ServerAddress and ServerPort are required") } if c.TrustDomain.String() == "" { return errors.New("TrustDomain is required") } if c.TrustBundle == nil { return errors.New("TrustBundle is required") } return nil } func newDefaultConfig() *agent.Config { addr := net.ParseIP(defaultBindAddress) port, _ := strconv.Atoi(defaultBindPort) bindAddr := &net.TCPAddr{IP: addr, Port: port} certDN := &pkix.Name{ Country: []string{"US"}, Organization: []string{"SPIRE"}, } errCh := make(chan error) shutdownCh := make(chan struct{}) // helpers.NewLogger() cannot return error when using STDOUT logger, _ := helpers.NewLogger(defaultLogLevel, "") serverAddress := &net.TCPAddr{} return &agent.Config{ BindAddress: bindAddr, CertDN: certDN, DataDir: defaultDataDir, PluginDir: defaultPluginDir, ErrorCh: errCh, ShutdownCh: shutdownCh, Log: logger, ServerAddress: serverAddress, } } func parseTrustBundle(path string) (*x509.CertPool, error) { data, err := ioutil.ReadFile(path) if err != nil { return nil, err } certPool := x509.NewCertPool() if ok := certPool.AppendCertsFromPEM(data); !ok { return nil, fmt.Errorf("No valid certificates found at %s", path) } return certPool, nil } func stringDefault(option string, defaultValue string) string { if option == "" { return defaultValue } return option } func signalListener(ch chan error) { go func() { signalCh := make(chan os.Signal, 1) signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM) ch <- fmt.Errorf("%s", <-signalCh) }() return }
1
8,530
nit: I'm under the impression that `log` is the convention for golang, and that the `er` suffix is usually reserved for interfaces
spiffe-spire
go
@@ -272,6 +272,12 @@ namespace NLog.Targets [ArrayParameter(typeof(DatabaseParameterInfo), "parameter")] public IList<DatabaseParameterInfo> Parameters { get; } = new List<DatabaseParameterInfo>(); + /// <summary> + /// Configures isolated transaction batch writing. If supported by the database, then it will improve insert performance. + /// </summary> + /// <docgen category='Performance Tuning Options' order='10' /> + public System.Data.IsolationLevel? IsolationLevel { get; set; } + #if !NETSTANDARD internal DbProviderFactory ProviderFactory { get; set; }
1
// // Copyright (c) 2004-2019 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ namespace NLog.Targets { using System; using System.Collections.Generic; using System.ComponentModel; using System.Data; using System.Data.Common; #if NETSTANDARD using System.Reflection; #endif using System.Text; #if !NETSTANDARD1_0 using System.Transactions; #endif using NLog.Common; using NLog.Config; using NLog.Internal; using NLog.Layouts; #if !NETSTANDARD using System.Configuration; using ConfigurationManager = System.Configuration.ConfigurationManager; #endif /// <summary> /// Writes log messages to the database using an ADO.NET provider. /// </summary> /// <remarks> /// - NETSTANDARD cannot load connectionstrings from .config /// </remarks> /// <seealso href="https://github.com/nlog/nlog/wiki/Database-target">Documentation on NLog Wiki</seealso> /// <example> /// <para> /// The configuration is dependent on the database type, because /// there are different methods of specifying connection string, SQL /// command and command parameters. /// </para> /// <para>MS SQL Server using System.Data.SqlClient:</para> /// <code lang="XML" source="examples/targets/Configuration File/Database/MSSQL/NLog.config" height="450" /> /// <para>Oracle using System.Data.OracleClient:</para> /// <code lang="XML" source="examples/targets/Configuration File/Database/Oracle.Native/NLog.config" height="350" /> /// <para>Oracle using System.Data.OleDBClient:</para> /// <code lang="XML" source="examples/targets/Configuration File/Database/Oracle.OleDB/NLog.config" height="350" /> /// <para>To set up the log target programmatically use code like this (an equivalent of MSSQL configuration):</para> /// <code lang="C#" source="examples/targets/Configuration API/Database/MSSQL/Example.cs" height="630" /> /// </example> [Target("Database")] public class DatabaseTarget : Target, IInstallable { private IDbConnection _activeConnection; private string _activeConnectionString; /// <summary> /// Initializes a new instance of the <see cref="DatabaseTarget" /> class. /// </summary> public DatabaseTarget() { InstallDdlCommands = new List<DatabaseCommandInfo>(); UninstallDdlCommands = new List<DatabaseCommandInfo>(); DBProvider = "sqlserver"; DBHost = "."; #if !NETSTANDARD ConnectionStringsSettings = ConfigurationManager.ConnectionStrings; #endif CommandType = CommandType.Text; OptimizeBufferReuse = GetType() == typeof(DatabaseTarget); // Class not sealed, reduce breaking changes } /// <summary> /// Initializes a new instance of the <see cref="DatabaseTarget" /> class. /// </summary> /// <param name="name">Name of the target.</param> public DatabaseTarget(string name) : this() { Name = name; } /// <summary> /// Gets or sets the name of the database provider. /// </summary> /// <remarks> /// <para> /// The parameter name should be a provider invariant name as registered in machine.config or app.config. Common values are: /// </para> /// <ul> /// <li><c>System.Data.SqlClient</c> - <see href="https://msdn.microsoft.com/en-us/library/system.data.sqlclient.aspx">SQL Sever Client</see></li> /// <li><c>System.Data.SqlServerCe.3.5</c> - <see href="https://www.microsoft.com/sqlserver/2005/en/us/compact.aspx">SQL Sever Compact 3.5</see></li> /// <li><c>System.Data.OracleClient</c> - <see href="https://msdn.microsoft.com/en-us/library/system.data.oracleclient.aspx">Oracle Client from Microsoft</see> (deprecated in .NET Framework 4)</li> /// <li><c>Oracle.DataAccess.Client</c> - <see href="https://www.oracle.com/technology/tech/windows/odpnet/index.html">ODP.NET provider from Oracle</see></li> /// <li><c>System.Data.SQLite</c> - <see href="http://sqlite.phxsoftware.com/">System.Data.SQLite driver for SQLite</see></li> /// <li><c>Npgsql</c> - <see href="https://www.npgsql.org/">Npgsql driver for PostgreSQL</see></li> /// <li><c>MySql.Data.MySqlClient</c> - <see href="https://www.mysql.com/downloads/connector/net/">MySQL Connector/Net</see></li> /// </ul> /// <para>(Note that provider invariant names are not supported on .NET Compact Framework).</para> /// <para> /// Alternatively the parameter value can be be a fully qualified name of the provider /// connection type (class implementing <see cref="IDbConnection" />) or one of the following tokens: /// </para> /// <ul> /// <li><c>sqlserver</c>, <c>mssql</c>, <c>microsoft</c> or <c>msde</c> - SQL Server Data Provider</li> /// <li><c>oledb</c> - OLEDB Data Provider</li> /// <li><c>odbc</c> - ODBC Data Provider</li> /// </ul> /// </remarks> /// <docgen category='Connection Options' order='10' /> [RequiredParameter] [DefaultValue("sqlserver")] public string DBProvider { get; set; } #if !NETSTANDARD /// <summary> /// Gets or sets the name of the connection string (as specified in <see href="https://msdn.microsoft.com/en-us/library/bf7sd233.aspx">&lt;connectionStrings&gt; configuration section</see>. /// </summary> /// <docgen category='Connection Options' order='10' /> public string ConnectionStringName { get; set; } #endif /// <summary> /// Gets or sets the connection string. When provided, it overrides the values /// specified in DBHost, DBUserName, DBPassword, DBDatabase. /// </summary> /// <docgen category='Connection Options' order='10' /> public Layout ConnectionString { get; set; } /// <summary> /// Gets or sets the connection string using for installation and uninstallation. If not provided, regular ConnectionString is being used. /// </summary> /// <docgen category='Installation Options' order='10' /> public Layout InstallConnectionString { get; set; } /// <summary> /// Gets the installation DDL commands. /// </summary> /// <docgen category='Installation Options' order='10' /> [ArrayParameter(typeof(DatabaseCommandInfo), "install-command")] public IList<DatabaseCommandInfo> InstallDdlCommands { get; private set; } /// <summary> /// Gets the uninstallation DDL commands. /// </summary> /// <docgen category='Installation Options' order='10' /> [ArrayParameter(typeof(DatabaseCommandInfo), "uninstall-command")] public IList<DatabaseCommandInfo> UninstallDdlCommands { get; private set; } /// <summary> /// Gets or sets a value indicating whether to keep the /// database connection open between the log events. /// </summary> /// <docgen category='Connection Options' order='10' /> [DefaultValue(false)] public bool KeepConnection { get; set; } /// <summary> /// Obsolete - value will be ignored! The logging code always runs outside of transaction. /// /// Gets or sets a value indicating whether to use database transactions. /// Some data providers require this. /// </summary> /// <docgen category='Connection Options' order='10' /> /// <remarks> /// This option was removed in NLog 4.0 because the logging code always runs outside of transaction. /// This ensures that the log gets written to the database if you rollback the main transaction because of an error and want to log the error. /// </remarks> [Obsolete("Value will be ignored as logging code always executes outside of a transaction. Marked obsolete on NLog 4.0 and it will be removed in NLog 6.")] public bool? UseTransactions { get; set; } /// <summary> /// Gets or sets the database host name. If the ConnectionString is not provided /// this value will be used to construct the "Server=" part of the /// connection string. /// </summary> /// <docgen category='Connection Options' order='10' /> public Layout DBHost { get; set; } /// <summary> /// Gets or sets the database user name. If the ConnectionString is not provided /// this value will be used to construct the "User ID=" part of the /// connection string. /// </summary> /// <docgen category='Connection Options' order='10' /> public Layout DBUserName { get; set; } /// <summary> /// Gets or sets the database password. If the ConnectionString is not provided /// this value will be used to construct the "Password=" part of the /// connection string. /// </summary> /// <docgen category='Connection Options' order='10' /> public Layout DBPassword { get => _dbPassword?.Layout; set => _dbPassword = TransformedLayout.Create(value, EscapeValueForConnectionString, RenderLogEvent); } /// <summary> /// Gets or sets the database name. If the ConnectionString is not provided /// this value will be used to construct the "Database=" part of the /// connection string. /// </summary> /// <docgen category='Connection Options' order='10' /> public Layout DBDatabase { get; set; } /// <summary> /// Gets or sets the text of the SQL command to be run on each log level. /// </summary> /// <remarks> /// Typically this is a SQL INSERT statement or a stored procedure call. /// It should use the database-specific parameters (marked as <c>@parameter</c> /// for SQL server or <c>:parameter</c> for Oracle, other data providers /// have their own notation) and not the layout renderers, /// because the latter is prone to SQL injection attacks. /// The layout renderers should be specified as &lt;parameter /&gt; elements instead. /// </remarks> /// <docgen category='SQL Statement' order='10' /> [RequiredParameter] public Layout CommandText { get; set; } /// <summary> /// Gets or sets the type of the SQL command to be run on each log level. /// </summary> /// <remarks> /// This specifies how the command text is interpreted, as "Text" (default) or as "StoredProcedure". /// When using the value StoredProcedure, the commandText-property would /// normally be the name of the stored procedure. TableDirect method is not supported in this context. /// </remarks> /// <docgen category='SQL Statement' order='11' /> [DefaultValue(CommandType.Text)] public CommandType CommandType { get; set; } /// <summary> /// Gets the collection of parameters. Each parameter contains a mapping /// between NLog layout and a database named or positional parameter. /// </summary> /// <docgen category='SQL Statement' order='14' /> [ArrayParameter(typeof(DatabaseParameterInfo), "parameter")] public IList<DatabaseParameterInfo> Parameters { get; } = new List<DatabaseParameterInfo>(); #if !NETSTANDARD internal DbProviderFactory ProviderFactory { get; set; } // this is so we can mock the connection string without creating sub-processes internal ConnectionStringSettingsCollection ConnectionStringsSettings { get; set; } #endif internal Type ConnectionType { get; set; } private IPropertyTypeConverter PropertyTypeConverter { get => _propertyTypeConverter ?? (_propertyTypeConverter = ConfigurationItemFactory.Default.PropertyTypeConverter); set => _propertyTypeConverter = value; } private IPropertyTypeConverter _propertyTypeConverter; SortHelpers.KeySelector<AsyncLogEventInfo, string> _buildConnectionStringDelegate; private TransformedLayout _dbPassword; /// <summary> /// Performs installation which requires administrative permissions. /// </summary> /// <param name="installationContext">The installation context.</param> public void Install(InstallationContext installationContext) { RunInstallCommands(installationContext, InstallDdlCommands); } /// <summary> /// Performs uninstallation which requires administrative permissions. /// </summary> /// <param name="installationContext">The installation context.</param> public void Uninstall(InstallationContext installationContext) { RunInstallCommands(installationContext, UninstallDdlCommands); } /// <summary> /// Determines whether the item is installed. /// </summary> /// <param name="installationContext">The installation context.</param> /// <returns> /// Value indicating whether the item is installed or null if it is not possible to determine. /// </returns> public bool? IsInstalled(InstallationContext installationContext) { return null; } internal IDbConnection OpenConnection(string connectionString) { IDbConnection connection; #if !NETSTANDARD if (ProviderFactory != null) { connection = ProviderFactory.CreateConnection(); } else #endif { connection = (IDbConnection)Activator.CreateInstance(ConnectionType); } if (connection == null) { throw new NLogRuntimeException("Creation of connection failed"); } connection.ConnectionString = connectionString; connection.Open(); return connection; } /// <summary> /// Initializes the target. Can be used by inheriting classes /// to initialize logging. /// </summary> [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Naming", "CA2204:Literals should be spelled correctly", MessageId = "connectionStrings", Justification = "Name of the config file section.")] protected override void InitializeTarget() { base.InitializeTarget(); #pragma warning disable 618 if (UseTransactions.HasValue) #pragma warning restore 618 { InternalLogger.Warn("DatabaseTarget(Name={0}): UseTransactions property is obsolete and will not be used - will be removed in NLog 6", Name); } bool foundProvider = false; string providerName = string.Empty; #if !NETSTANDARD if (!string.IsNullOrEmpty(ConnectionStringName)) { // read connection string and provider factory from the configuration file var cs = ConnectionStringsSettings[ConnectionStringName]; if (cs == null) { throw new NLogConfigurationException($"Connection string '{ConnectionStringName}' is not declared in <connectionStrings /> section."); } if (!string.IsNullOrEmpty(cs.ConnectionString?.Trim())) { ConnectionString = SimpleLayout.Escape(cs.ConnectionString.Trim()); } providerName = cs.ProviderName?.Trim() ?? string.Empty; } #endif if (ConnectionString != null) { providerName = InitConnectionString(providerName); } #if !NETSTANDARD if (string.IsNullOrEmpty(providerName)) { providerName = GetProviderNameFromDbProviderFactories(providerName); } if (!string.IsNullOrEmpty(providerName)) { foundProvider = InitProviderFactory(providerName); } #endif if (!foundProvider) { try { SetConnectionType(); if (ConnectionType == null) { InternalLogger.Warn("DatabaseTarget(Name={0}): No ConnectionType created from DBProvider={1}", Name, DBProvider); } } catch (Exception ex) { InternalLogger.Error(ex, "DatabaseTarget(Name={0}): Failed to create ConnectionType from DBProvider={1}", Name, DBProvider); throw; } } } private string InitConnectionString(string providerName) { try { var connectionString = BuildConnectionString(LogEventInfo.CreateNullEvent()); var dbConnectionStringBuilder = new DbConnectionStringBuilder { ConnectionString = connectionString }; if (dbConnectionStringBuilder.TryGetValue("provider connection string", out var connectionStringValue)) { // Special Entity Framework Connection String if (dbConnectionStringBuilder.TryGetValue("provider", out var providerValue)) { // Provider was overriden by ConnectionString providerName = providerValue.ToString()?.Trim() ?? string.Empty; } // ConnectionString was overriden by ConnectionString :) ConnectionString = SimpleLayout.Escape(connectionStringValue.ToString()); } } catch (Exception ex) { #if !NETSTANDARD if (!string.IsNullOrEmpty(ConnectionStringName)) InternalLogger.Warn(ex, "DatabaseTarget(Name={0}): DbConnectionStringBuilder failed to parse '{1}' ConnectionString", Name, ConnectionStringName); else #endif InternalLogger.Warn(ex, "DatabaseTarget(Name={0}): DbConnectionStringBuilder failed to parse ConnectionString", Name); } return providerName; } #if !NETSTANDARD private bool InitProviderFactory(string providerName) { bool foundProvider; try { ProviderFactory = DbProviderFactories.GetFactory(providerName); foundProvider = true; } catch (Exception ex) { InternalLogger.Error(ex, "DatabaseTarget(Name={0}): DbProviderFactories failed to get factory from ProviderName={1}", Name, providerName); throw; } return foundProvider; } private string GetProviderNameFromDbProviderFactories(string providerName) { string dbProvider = DBProvider?.Trim() ?? string.Empty; if (!string.IsNullOrEmpty(dbProvider)) { foreach (DataRow row in DbProviderFactories.GetFactoryClasses().Rows) { var invariantname = (string)row["InvariantName"]; if (string.Equals(invariantname, dbProvider, StringComparison.OrdinalIgnoreCase)) { providerName = invariantname; break; } } } return providerName; } #endif /// <summary> /// Set the <see cref="ConnectionType"/> to use it for opening connections to the database. /// </summary> private void SetConnectionType() { switch (DBProvider.ToUpperInvariant()) { case "SQLSERVER": case "MSSQL": case "MICROSOFT": case "MSDE": case "SYSTEM.DATA.SQLCLIENT": { #if NETSTANDARD var assembly = Assembly.Load(new AssemblyName("System.Data.SqlClient")); #else var assembly = typeof(IDbConnection).GetAssembly(); #endif ConnectionType = assembly.GetType("System.Data.SqlClient.SqlConnection", true, true); break; } #if !NETSTANDARD case "OLEDB": { var assembly = typeof(IDbConnection).GetAssembly(); ConnectionType = assembly.GetType("System.Data.OleDb.OleDbConnection", true, true); break; } #endif case "ODBC": case "SYSTEM.DATA.ODBC": { #if NETSTANDARD var assembly = Assembly.Load(new AssemblyName("System.Data.Odbc")); #else var assembly = typeof(IDbConnection).GetAssembly(); #endif ConnectionType = assembly.GetType("System.Data.Odbc.OdbcConnection", true, true); break; } default: ConnectionType = Type.GetType(DBProvider, true, true); break; } } /// <summary> /// Closes the target and releases any unmanaged resources. /// </summary> protected override void CloseTarget() { PropertyTypeConverter = null; base.CloseTarget(); InternalLogger.Trace("DatabaseTarget(Name={0}): Close connection because of CloseTarget", Name); CloseConnection(); } /// <summary> /// Writes the specified logging event to the database. It creates /// a new database command, prepares parameters for it by calculating /// layouts and executes the command. /// </summary> /// <param name="logEvent">The logging event.</param> protected override void Write(LogEventInfo logEvent) { try { WriteEventToDatabase(logEvent, BuildConnectionString(logEvent)); } catch (Exception exception) { InternalLogger.Error(exception, "DatabaseTarget(Name={0}): Error when writing to database.", Name); if (exception.MustBeRethrownImmediately()) { throw; } InternalLogger.Trace("DatabaseTarget(Name={0}): Close connection because of error", Name); CloseConnection(); throw; } finally { if (!KeepConnection) { InternalLogger.Trace("DatabaseTarget(Name={0}): Close connection (KeepConnection = false).", Name); CloseConnection(); } } } /// <summary> /// NOTE! Obsolete, instead override Write(IList{AsyncLogEventInfo} logEvents) /// /// Writes an array of logging events to the log target. By default it iterates on all /// events and passes them to "Write" method. Inheriting classes can use this method to /// optimize batch writes. /// </summary> /// <param name="logEvents">Logging events to be written out.</param> [Obsolete("Instead override Write(IList<AsyncLogEventInfo> logEvents. Marked obsolete on NLog 4.5")] protected override void Write(AsyncLogEventInfo[] logEvents) { Write((IList<AsyncLogEventInfo>)logEvents); } /// <summary> /// Writes an array of logging events to the log target. By default it iterates on all /// events and passes them to "Write" method. Inheriting classes can use this method to /// optimize batch writes. /// </summary> /// <param name="logEvents">Logging events to be written out.</param> protected override void Write(IList<AsyncLogEventInfo> logEvents) { if (_buildConnectionStringDelegate == null) _buildConnectionStringDelegate = (l) => BuildConnectionString(l.LogEvent); var buckets = logEvents.BucketSort(_buildConnectionStringDelegate); try { foreach (var kvp in buckets) { for (int i = 0; i < kvp.Value.Count; i++) { AsyncLogEventInfo ev = kvp.Value[i]; try { WriteEventToDatabase(ev.LogEvent, kvp.Key); ev.Continuation(null); } catch (Exception exception) { // in case of exception, close the connection and report it InternalLogger.Error(exception, "DatabaseTarget(Name={0}): Error when writing to database.", Name); if (exception.MustBeRethrownImmediately()) { throw; } InternalLogger.Trace("DatabaseTarget(Name={0}): Close connection because of exception", Name); CloseConnection(); ev.Continuation(exception); if (exception.MustBeRethrown()) { throw; } } } } } finally { if (!KeepConnection) { InternalLogger.Trace("DatabaseTarget(Name={0}): Close connection because of KeepConnection=false", Name); CloseConnection(); } } } /// <summary> /// Write logEvent to database /// </summary> [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Security", "CA2100:Review SQL queries for security vulnerabilities", Justification = "It's up to the user to ensure proper quoting.")] private void WriteEventToDatabase(LogEventInfo logEvent, string connectionString) { var commandText = RenderLogEvent(CommandText, logEvent); InternalLogger.Trace("DatabaseTarget(Name={0}): Executing {1}: {2}", Name, CommandType, commandText); //Always suppress transaction so that the caller does not rollback logging if they are rolling back their transaction. using (TransactionScope transactionScope = new TransactionScope(TransactionScopeOption.Suppress)) { EnsureConnectionOpen(connectionString); using (IDbCommand command = CreateDbCommandWithParameters(logEvent, CommandType, commandText, Parameters)) { int result = command.ExecuteNonQuery(); InternalLogger.Trace("DatabaseTarget(Name={0}): Finished execution, result = {1}", Name, result); } //not really needed as there is no transaction at all. transactionScope.Complete(); } } [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Security", "CA2100:Review SQL queries for security vulnerabilities", Justification = "It's up to the user to ensure proper quoting.")] private IDbCommand CreateDbCommandWithParameters(LogEventInfo logEvent, CommandType commandType, string dbCommandText, IList<DatabaseParameterInfo> databaseParameterInfos) { var dbCommand = _activeConnection.CreateCommand(); dbCommand.CommandType = commandType; dbCommand.CommandText = dbCommandText; for (int i = 0; i < databaseParameterInfos.Count; ++i) { var parameterInfo = databaseParameterInfos[i]; var dbParameter = CreateDatabaseParameter(dbCommand, parameterInfo); var dbParameterValue = GetDatabaseParameterValue(logEvent, parameterInfo); dbParameter.Value = dbParameterValue; dbCommand.Parameters.Add(dbParameter); InternalLogger.Trace(" DatabaseTarget: Parameter: '{0}' = '{1}' ({2})", dbParameter.ParameterName, dbParameter.Value, dbParameter.DbType); } return dbCommand; } /// <summary> /// Build the connectionstring from the properties. /// </summary> /// <remarks> /// Using <see cref="ConnectionString"/> at first, and falls back to the properties <see cref="DBHost"/>, /// <see cref="DBUserName"/>, <see cref="DBPassword"/> and <see cref="DBDatabase"/> /// </remarks> /// <param name="logEvent">Event to render the layout inside the properties.</param> /// <returns></returns> protected string BuildConnectionString(LogEventInfo logEvent) { if (ConnectionString != null) { return RenderLogEvent(ConnectionString, logEvent); } var sb = new StringBuilder(); sb.Append("Server="); sb.Append(RenderLogEvent(DBHost, logEvent)); sb.Append(";"); var dbUserName = RenderLogEvent(DBUserName, logEvent); if (string.IsNullOrEmpty(dbUserName)) { sb.Append("Trusted_Connection=SSPI;"); } else { sb.Append("User id="); sb.Append(dbUserName); sb.Append(";Password="); var password = _dbPassword.Render(logEvent); sb.Append(password); sb.Append(";"); } var dbDatabase = RenderLogEvent(DBDatabase, logEvent); if (!string.IsNullOrEmpty(dbDatabase)) { sb.Append("Database="); sb.Append(dbDatabase); } return sb.ToString(); } /// <summary> /// Escape quotes and semicolons. /// See https://docs.microsoft.com/en-us/previous-versions/windows/desktop/ms722656(v=vs.85)#setting-values-that-use-reserved-characters /// </summary> private static string EscapeValueForConnectionString(string value) { const string singleQuote = "'"; if (value.StartsWith(singleQuote) && value.EndsWith(singleQuote)) { // already escaped return value; } const string doubleQuote = "\""; if (value.StartsWith(doubleQuote) && value.EndsWith(doubleQuote)) { // already escaped return value; } var containsSingle = value.Contains(singleQuote); var containsDouble = value.Contains(doubleQuote); if (value.Contains(";") || containsSingle || containsDouble) { if (!containsSingle) { return string.Concat(singleQuote, value, singleQuote); } if (!containsDouble) { return string.Concat(doubleQuote, value, doubleQuote); } // both single and double var escapedValue = value.Replace(doubleQuote, doubleQuote + doubleQuote); return string.Concat(doubleQuote, escapedValue, doubleQuote); } return value; } private void EnsureConnectionOpen(string connectionString) { if (_activeConnection != null && _activeConnectionString != connectionString) { InternalLogger.Trace("DatabaseTarget(Name={0}): Close connection because of opening new.", Name); CloseConnection(); } if (_activeConnection != null) { return; } InternalLogger.Trace("DatabaseTarget(Name={0}): Open connection.", Name); _activeConnection = OpenConnection(connectionString); _activeConnectionString = connectionString; } private void CloseConnection() { _activeConnectionString = null; if (_activeConnection != null) { _activeConnection.Close(); _activeConnection.Dispose(); _activeConnection = null; } } [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Security", "CA2100:Review SQL queries for security vulnerabilities", Justification = "It's up to the user to ensure proper quoting.")] private void RunInstallCommands(InstallationContext installationContext, IEnumerable<DatabaseCommandInfo> commands) { // create log event that will be used to render all layouts LogEventInfo logEvent = installationContext.CreateLogEvent(); try { foreach (var commandInfo in commands) { var connectionString = GetConnectionStringFromCommand(commandInfo, logEvent); // Set ConnectionType if it has not been initialized already if (ConnectionType == null) { SetConnectionType(); } EnsureConnectionOpen(connectionString); string commandText = RenderLogEvent(commandInfo.Text, logEvent); installationContext.Trace("DatabaseTarget(Name={0}) - Executing {1} '{2}'", Name, commandInfo.CommandType, commandText); using (IDbCommand command = CreateDbCommandWithParameters(logEvent, commandInfo.CommandType, commandText, commandInfo.Parameters)) { try { command.ExecuteNonQuery(); } catch (Exception exception) { if (exception.MustBeRethrownImmediately()) { throw; } if (commandInfo.IgnoreFailures || installationContext.IgnoreFailures) { installationContext.Warning(exception.Message); } else { installationContext.Error(exception.Message); throw; } } } } } finally { InternalLogger.Trace("DatabaseTarget(Name={0}): Close connection after install.", Name); CloseConnection(); } } private string GetConnectionStringFromCommand(DatabaseCommandInfo commandInfo, LogEventInfo logEvent) { string connectionString; if (commandInfo.ConnectionString != null) { // if there is connection string specified on the command info, use it connectionString = RenderLogEvent(commandInfo.ConnectionString, logEvent); } else if (InstallConnectionString != null) { // next, try InstallConnectionString connectionString = RenderLogEvent(InstallConnectionString, logEvent); } else { // if it's not defined, fall back to regular connection string connectionString = BuildConnectionString(logEvent); } return connectionString; } /// <summary> /// Create database parameter /// </summary> /// <param name="command">Current command.</param> /// <param name="parameterInfo">Parameter configuration info.</param> protected virtual IDbDataParameter CreateDatabaseParameter(IDbCommand command, DatabaseParameterInfo parameterInfo) { IDbDataParameter dbParameter = command.CreateParameter(); dbParameter.Direction = ParameterDirection.Input; if (parameterInfo.Name != null) { dbParameter.ParameterName = parameterInfo.Name; } if (parameterInfo.Size != 0) { dbParameter.Size = parameterInfo.Size; } if (parameterInfo.Precision != 0) { dbParameter.Precision = parameterInfo.Precision; } if (parameterInfo.Scale != 0) { dbParameter.Scale = parameterInfo.Scale; } try { if (!parameterInfo.SetDbType(dbParameter)) { InternalLogger.Warn(" DatabaseTarget: Parameter: '{0}' - Failed to assign DbType={1}", parameterInfo.Name, parameterInfo.DbType); } } catch (Exception ex) { if (ex.MustBeRethrownImmediately()) throw; InternalLogger.Error(ex, " DatabaseTarget: Parameter: '{0}' - Failed to assign DbType={1}", parameterInfo.Name, parameterInfo.DbType); if (ex.MustBeRethrown()) throw; } return dbParameter; } /// <summary> /// Extract parameter value from the logevent /// </summary> /// <param name="logEvent">Current logevent.</param> /// <param name="parameterInfo">Parameter configuration info.</param> protected internal virtual object GetDatabaseParameterValue(LogEventInfo logEvent, DatabaseParameterInfo parameterInfo) { Type dbParameterType = parameterInfo.ParameterType; if (string.IsNullOrEmpty(parameterInfo.Format) && dbParameterType == typeof(string)) { return RenderLogEvent(parameterInfo.Layout, logEvent) ?? string.Empty; } IFormatProvider dbParameterCulture = GetDbParameterCulture(logEvent, parameterInfo); if (TryGetConvertedRawValue(logEvent, parameterInfo, dbParameterType, dbParameterCulture, out var value)) { return value ?? CreateDefaultValue(dbParameterType); } try { InternalLogger.Trace(" DatabaseTarget: Attempt to convert layout value for '{0}' into {1}", parameterInfo.Name, dbParameterType?.Name); string parameterValue = RenderLogEvent(parameterInfo.Layout, logEvent); if (string.IsNullOrEmpty(parameterValue)) { return CreateDefaultValue(dbParameterType); } return PropertyTypeConverter.Convert(parameterValue, dbParameterType, parameterInfo.Format, dbParameterCulture) ?? DBNull.Value; } catch (Exception ex) { if (ex.MustBeRethrownImmediately()) throw; InternalLogger.Warn(ex, " DatabaseTarget: Failed to convert layout value for '{0}' into {1}", parameterInfo.Name, dbParameterType?.Name); if (ex.MustBeRethrown()) throw; return CreateDefaultValue(dbParameterType); } } private bool TryGetConvertedRawValue(LogEventInfo logEvent, DatabaseParameterInfo parameterInfo, Type dbParameterType, IFormatProvider dbParameterCulture, out object value) { if (parameterInfo.Layout.TryGetRawValue(logEvent, out var rawValue)) { try { InternalLogger.Trace(" DatabaseTarget: Attempt to convert raw value for '{0}' into {1}", parameterInfo.Name, dbParameterType?.Name); if (ReferenceEquals(rawValue, DBNull.Value)) { value = rawValue; return true; } value = PropertyTypeConverter.Convert(rawValue, dbParameterType, parameterInfo.Format, dbParameterCulture); return true; } catch (Exception ex) { if (ex.MustBeRethrownImmediately()) throw; InternalLogger.Warn(ex, " DatabaseTarget: Failed to convert raw value for '{0}' into {1}", parameterInfo.Name, dbParameterType?.Name); if (ex.MustBeRethrown()) throw; } } value = null; return false; } /// <summary> /// Create Default Value of Type /// </summary> /// <param name="dbParameterType"></param> /// <returns></returns> private static object CreateDefaultValue(Type dbParameterType) { if (dbParameterType == typeof(string)) return string.Empty; else if (dbParameterType.IsValueType()) return Activator.CreateInstance(dbParameterType); else return DBNull.Value; } private IFormatProvider GetDbParameterCulture(LogEventInfo logEvent, DatabaseParameterInfo parameterInfo) { return parameterInfo.Culture ?? logEvent.FormatProvider ?? LoggingConfiguration?.DefaultCultureInfo; } #if NETSTANDARD1_0 /// <summary> /// Fake transaction /// /// Transactions aren't in .NET Core: https://github.com/dotnet/corefx/issues/2949 /// </summary> private class TransactionScope : IDisposable { private readonly TransactionScopeOption suppress; public TransactionScope(TransactionScopeOption suppress) { this.suppress = suppress; } public void Complete() { } /// <summary> /// Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. /// </summary> public void Dispose() { } } /// <summary> /// Fake option /// </summary> private enum TransactionScopeOption { Required, RequiresNew, Suppress, } #endif } } #endif
1
20,713
Why is IsolationLevel fully qualified? System.Data is already in scope? What do I miss?
NLog-NLog
.cs
@@ -24,15 +24,8 @@ var ( // Action is the action can be Executed in protocols. The method is added to avoid mistakenly used empty interface as action. type Action interface { - SetEnvelopeContext(SealedEnvelope) - SanityCheck() error -} - -type actionPayload interface { - Serialize() []byte Cost() (*big.Int, error) IntrinsicGas() (uint64, error) - SetEnvelopeContext(SealedEnvelope) SanityCheck() error }
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package action import ( "math" "math/big" "github.com/pkg/errors" "github.com/iotexproject/go-pkgs/crypto" ) var ( // ErrAction indicates error for an action ErrAction = errors.New("action error") // ErrAddress indicates error of address ErrAddress = errors.New("address error") ) // Action is the action can be Executed in protocols. The method is added to avoid mistakenly used empty interface as action. type Action interface { SetEnvelopeContext(SealedEnvelope) SanityCheck() error } type actionPayload interface { Serialize() []byte Cost() (*big.Int, error) IntrinsicGas() (uint64, error) SetEnvelopeContext(SealedEnvelope) SanityCheck() error } type hasDestination interface { Destination() string } // Sign signs the action using sender's private key func Sign(act Envelope, sk crypto.PrivateKey) (SealedEnvelope, error) { sealed := SealedEnvelope{Envelope: act} sealed.srcPubkey = sk.PublicKey() hash := act.Hash() sig, err := sk.Sign(hash[:]) if err != nil { return sealed, errors.Wrapf(ErrAction, "failed to sign action hash = %x", hash) } sealed.signature = sig sealed.payload.SetEnvelopeContext(sealed) return sealed, nil } // FakeSeal creates a SealedActionEnvelope without signature. // This method should be only used in tests. func FakeSeal(act Envelope, pubk crypto.PublicKey) SealedEnvelope { sealed := SealedEnvelope{ Envelope: act, srcPubkey: pubk, } sealed.payload.SetEnvelopeContext(sealed) return sealed } // AssembleSealedEnvelope assembles a SealedEnvelope use Envelope, Sender Address and Signature. // This method should be only used in tests. func AssembleSealedEnvelope(act Envelope, pk crypto.PublicKey, sig []byte) SealedEnvelope { sealed := SealedEnvelope{ Envelope: act, srcPubkey: pk, signature: sig, } sealed.payload.SetEnvelopeContext(sealed) return sealed } // Verify verifies the action using sender's public key func Verify(sealed SealedEnvelope) error { if sealed.SrcPubkey() == nil { return errors.New("empty public key") } // Reject action with insufficient gas limit intrinsicGas, err := sealed.IntrinsicGas() if intrinsicGas > sealed.GasLimit() || err != nil { return errors.Wrap(ErrInsufficientBalanceForGas, "insufficient gas") } hash := sealed.Envelope.Hash() if sealed.SrcPubkey().Verify(hash[:], sealed.Signature()) { return nil } return errors.Wrapf( ErrAction, "failed to verify action hash = %x and signature = %x", hash, sealed.Signature(), ) } // ClassifyActions classfies actions func ClassifyActions(actions []SealedEnvelope) ([]*Transfer, []*Execution) { tsfs := make([]*Transfer, 0) exes := make([]*Execution, 0) for _, elp := range actions { act := elp.Action() switch act := act.(type) { case *Transfer: tsfs = append(tsfs, act) case *Execution: exes = append(exes, act) } } return tsfs, exes } func calculateIntrinsicGas(baseIntrinsicGas uint64, payloadGas uint64, payloadSize uint64) (uint64, error) { if (math.MaxUint64-baseIntrinsicGas)/payloadGas < payloadSize { return 0, ErrOutOfGas } return payloadSize*payloadGas + baseIntrinsicGas, nil }
1
21,667
do we still need SanityCheck()? seems they all return nil now
iotexproject-iotex-core
go
@@ -94,7 +94,6 @@ export default function Core(rootElement, userSettings, rootInstanceSymbol = fal * @type {HTMLElement} */ this.rootElement = rootElement; - /* eslint-enable jsdoc/require-description-complete-sentence */ /** * The nearest document over container. *
1
import { addClass, empty, removeClass } from './helpers/dom/element'; import { isFunction } from './helpers/function'; import { isDefined, isUndefined, isRegExp, _injectProductInfo, isEmpty } from './helpers/mixed'; import { isMobileBrowser, isIpadOS } from './helpers/browser'; import EditorManager from './editorManager'; import EventManager from './eventManager'; import { deepClone, duckSchema, isObjectEqual, isObject, deepObjectSize, hasOwnProperty, createObjectPropListener, objectEach } from './helpers/object'; import { arrayMap, arrayEach, arrayReduce, getDifferenceOfArrays, stringToArray, pivot } from './helpers/array'; import { instanceToHTML } from './utils/parseTable'; import { getPlugin, getPluginsNames } from './plugins/registry'; import { getRenderer } from './renderers/registry'; import { getValidator } from './validators/registry'; import { randomString, toUpperCaseFirst } from './helpers/string'; import { rangeEach, rangeEachReverse, isNumericLike } from './helpers/number'; import TableView from './tableView'; import DataSource from './dataSource'; import { cellMethodLookupFactory, spreadsheetColumnLabel } from './helpers/data'; import { IndexMapper } from './translations'; import { registerAsRootInstance, hasValidParameter, isRootInstance } from './utils/rootInstance'; import { CellCoords, ViewportColumnsCalculator } from './3rdparty/walkontable/src'; import Hooks from './pluginHooks'; import { hasLanguageDictionary, getValidLanguageCode, getTranslatedPhrase } from './i18n/registry'; import { warnUserAboutLanguageRegistration, normalizeLanguageCode } from './i18n/utils'; import { startObserving as keyStateStartObserving, stopObserving as keyStateStopObserving } from './utils/keyStateObserver'; import { Selection } from './selection'; import { MetaManager, DynamicCellMetaMod, replaceData } from './dataMap'; import { createUniqueMap } from './utils/dataStructures/uniqueMap'; let activeGuid = null; /* eslint-disable jsdoc/require-description-complete-sentence */ /** * Handsontable constructor. * * @core * @class Core * @description * * The `Handsontable` class to which we refer as to `Core`, allows you to modify the grid's behavior by using one of the available public methods. * * ## How to call a method * * ```js * // First, let's contruct Handsontable * const hot = new Handsontable(document.getElementById('example'), options); * * // Then, let's use the setDataAtCell method * hot.setDataAtCell(0, 0, 'new value'); * ``` * * @param {HTMLElement} rootElement The element to which the Handsontable instance is injected. * @param {object} userSettings The user defined options. * @param {boolean} [rootInstanceSymbol=false] Indicates if the instance is root of all later instances created. */ export default function Core(rootElement, userSettings, rootInstanceSymbol = false) { let preventScrollingToCell = false; let instance = this; const eventManager = new EventManager(instance); let datamap; let dataSource; let grid; let editorManager; let firstRun = true; userSettings.language = getValidLanguageCode(userSettings.language); const metaManager = new MetaManager(instance, userSettings, [DynamicCellMetaMod]); const tableMeta = metaManager.getTableMeta(); const globalMeta = metaManager.getGlobalMeta(); const pluginsRegistry = createUniqueMap(); if (hasValidParameter(rootInstanceSymbol)) { registerAsRootInstance(this); } // TODO: check if references to DOM elements should be move to UI layer (Walkontable) /** * Reference to the container element. * * @private * @type {HTMLElement} */ this.rootElement = rootElement; /* eslint-enable jsdoc/require-description-complete-sentence */ /** * The nearest document over container. * * @private * @type {Document} */ this.rootDocument = rootElement.ownerDocument; /** * Window object over container's document. * * @private * @type {Window} */ this.rootWindow = this.rootDocument.defaultView; /** * A boolean to tell if the Handsontable has been fully destroyed. This is set to `true` * after `afterDestroy` hook is called. * * @memberof Core# * @member isDestroyed * @type {boolean} */ this.isDestroyed = false; /** * The counter determines how many times the render suspending was called. It allows * tracking the nested suspending calls. For each render suspend resuming call the * counter is decremented. The value equal to 0 means the render suspending feature * is disabled. * * @private * @type {number} */ this.renderSuspendedCounter = 0; /** * The counter determines how many times the execution suspending was called. It allows * tracking the nested suspending calls. For each execution suspend resuming call the * counter is decremented. The value equal to 0 means the execution suspending feature * is disabled. * * @private * @type {number} */ this.executionSuspendedCounter = 0; keyStateStartObserving(this.rootDocument); this.container = this.rootDocument.createElement('div'); this.renderCall = false; rootElement.insertBefore(this.container, rootElement.firstChild); if (isRootInstance(this)) { _injectProductInfo(userSettings.licenseKey, rootElement); } this.guid = `ht_${randomString()}`; // this is the namespace for global events /** * Instance of index mapper which is responsible for managing the column indexes. * * @memberof Core# * @member columnIndexMapper * @type {IndexMapper} */ this.columnIndexMapper = new IndexMapper(); /** * Instance of index mapper which is responsible for managing the row indexes. * * @memberof Core# * @member rowIndexMapper * @type {IndexMapper} */ this.rowIndexMapper = new IndexMapper(); dataSource = new DataSource(instance); if (!this.rootElement.id || this.rootElement.id.substring(0, 3) === 'ht_') { this.rootElement.id = this.guid; // if root element does not have an id, assign a random id } const visualToRenderableCoords = (coords) => { const { row: visualRow, col: visualColumn } = coords; return new CellCoords( // We just store indexes for rows and columns without headers. visualRow >= 0 ? instance.rowIndexMapper.getRenderableFromVisualIndex(visualRow) : visualRow, visualColumn >= 0 ? instance.columnIndexMapper.getRenderableFromVisualIndex(visualColumn) : visualColumn ); }; const renderableToVisualCoords = (coords) => { const { row: renderableRow, col: renderableColumn } = coords; return new CellCoords( // We just store indexes for rows and columns without headers. renderableRow >= 0 ? instance.rowIndexMapper.getVisualFromRenderableIndex(renderableRow) : renderableRow, renderableColumn >= 0 ? instance.columnIndexMapper.getVisualFromRenderableIndex(renderableColumn) : renderableColumn // eslint-disable-line max-len ); }; let selection = new Selection(tableMeta, { countCols: () => instance.countCols(), countRows: () => instance.countRows(), propToCol: prop => datamap.propToCol(prop), isEditorOpened: () => (instance.getActiveEditor() ? instance.getActiveEditor().isOpened() : false), countColsTranslated: () => this.view.countRenderableColumns(), countRowsTranslated: () => this.view.countRenderableRows(), visualToRenderableCoords, renderableToVisualCoords, isDisabledCellSelection: (visualRow, visualColumn) => instance.getCellMeta(visualRow, visualColumn).disableVisualSelection }); this.selection = selection; const onIndexMapperCacheUpdate = ({ hiddenIndexesChanged }) => { if (hiddenIndexesChanged) { this.selection.refresh(); } }; this.columnIndexMapper.addLocalHook('cacheUpdated', onIndexMapperCacheUpdate); this.rowIndexMapper.addLocalHook('cacheUpdated', onIndexMapperCacheUpdate); this.selection.addLocalHook('beforeSetRangeStart', (cellCoords) => { this.runHooks('beforeSetRangeStart', cellCoords); }); this.selection.addLocalHook('beforeSetRangeStartOnly', (cellCoords) => { this.runHooks('beforeSetRangeStartOnly', cellCoords); }); this.selection.addLocalHook('beforeSetRangeEnd', (cellCoords) => { this.runHooks('beforeSetRangeEnd', cellCoords); if (cellCoords.row < 0) { cellCoords.row = this.view.wt.wtTable.getFirstVisibleRow(); } if (cellCoords.col < 0) { cellCoords.col = this.view.wt.wtTable.getFirstVisibleColumn(); } }); this.selection.addLocalHook('afterSetRangeEnd', (cellCoords) => { const preventScrolling = createObjectPropListener(false); const selectionRange = this.selection.getSelectedRange(); const { from, to } = selectionRange.current(); const selectionLayerLevel = selectionRange.size() - 1; this.runHooks('afterSelection', from.row, from.col, to.row, to.col, preventScrolling, selectionLayerLevel); this.runHooks('afterSelectionByProp', from.row, instance.colToProp(from.col), to.row, instance.colToProp(to.col), preventScrolling, selectionLayerLevel); // eslint-disable-line max-len const isSelectedByAnyHeader = this.selection.isSelectedByAnyHeader(); const currentSelectedRange = this.selection.selectedRange.current(); let scrollToCell = true; if (preventScrollingToCell) { scrollToCell = false; } if (preventScrolling.isTouched()) { scrollToCell = !preventScrolling.value; } const isSelectedByRowHeader = this.selection.isSelectedByRowHeader(); const isSelectedByColumnHeader = this.selection.isSelectedByColumnHeader(); if (scrollToCell !== false) { if (!isSelectedByAnyHeader) { if (currentSelectedRange && !this.selection.isMultiple()) { this.view.scrollViewport(visualToRenderableCoords(currentSelectedRange.from)); } else { this.view.scrollViewport(visualToRenderableCoords(cellCoords)); } } else if (isSelectedByRowHeader) { this.view.scrollViewportVertically(instance.rowIndexMapper.getRenderableFromVisualIndex(cellCoords.row)); } else if (isSelectedByColumnHeader) { this.view.scrollViewportHorizontally(instance.columnIndexMapper.getRenderableFromVisualIndex(cellCoords.col)); } } // @TODO: These CSS classes are no longer needed anymore. They are used only as a indicator of the selected // rows/columns in the MergedCells plugin (via border.js#L520 in the walkontable module). After fixing // the Border class this should be removed. if (isSelectedByRowHeader && isSelectedByColumnHeader) { addClass(this.rootElement, ['ht__selection--rows', 'ht__selection--columns']); } else if (isSelectedByRowHeader) { removeClass(this.rootElement, 'ht__selection--columns'); addClass(this.rootElement, 'ht__selection--rows'); } else if (isSelectedByColumnHeader) { removeClass(this.rootElement, 'ht__selection--rows'); addClass(this.rootElement, 'ht__selection--columns'); } else { removeClass(this.rootElement, ['ht__selection--rows', 'ht__selection--columns']); } this._refreshBorders(null); }); this.selection.addLocalHook('afterSelectionFinished', (cellRanges) => { const selectionLayerLevel = cellRanges.length - 1; const { from, to } = cellRanges[selectionLayerLevel]; this.runHooks('afterSelectionEnd', from.row, from.col, to.row, to.col, selectionLayerLevel); this.runHooks('afterSelectionEndByProp', from.row, instance.colToProp(from.col), to.row, instance.colToProp(to.col), selectionLayerLevel); }); this.selection.addLocalHook('afterIsMultipleSelection', (isMultiple) => { const changedIsMultiple = this.runHooks('afterIsMultipleSelection', isMultiple.value); if (isMultiple.value) { isMultiple.value = changedIsMultiple; } }); this.selection.addLocalHook('beforeModifyTransformStart', (cellCoordsDelta) => { this.runHooks('modifyTransformStart', cellCoordsDelta); }); this.selection.addLocalHook('afterModifyTransformStart', (coords, rowTransformDir, colTransformDir) => { this.runHooks('afterModifyTransformStart', coords, rowTransformDir, colTransformDir); }); this.selection.addLocalHook('beforeModifyTransformEnd', (cellCoordsDelta) => { this.runHooks('modifyTransformEnd', cellCoordsDelta); }); this.selection.addLocalHook('afterModifyTransformEnd', (coords, rowTransformDir, colTransformDir) => { this.runHooks('afterModifyTransformEnd', coords, rowTransformDir, colTransformDir); }); this.selection.addLocalHook('afterDeselect', () => { editorManager.destroyEditor(); this._refreshBorders(); removeClass(this.rootElement, ['ht__selection--rows', 'ht__selection--columns']); this.runHooks('afterDeselect'); }); this.selection.addLocalHook('insertRowRequire', (totalRows) => { this.alter('insert_row', totalRows, 1, 'auto'); }); this.selection.addLocalHook('insertColRequire', (totalCols) => { this.alter('insert_col', totalCols, 1, 'auto'); }); grid = { /** * Inserts or removes rows and columns. * * @private * @param {string} action Possible values: "insert_row", "insert_col", "remove_row", "remove_col". * @param {number|Array} index Row or column visual index which from the alter action will be triggered. * Alter actions such as "remove_row" and "remove_col" support array indexes in the * format `[[index, amount], [index, amount]...]` this can be used to remove * non-consecutive columns or rows in one call. * @param {number} [amount=1] Ammount rows or columns to remove. * @param {string} [source] Optional. Source of hook runner. * @param {boolean} [keepEmptyRows] Optional. Flag for preventing deletion of empty rows. */ alter(action, index, amount = 1, source, keepEmptyRows) { let delta; const normalizeIndexesGroup = (indexes) => { if (indexes.length === 0) { return []; } const sortedIndexes = [...indexes]; // Sort the indexes in ascending order. sortedIndexes.sort(([indexA], [indexB]) => { if (indexA === indexB) { return 0; } return indexA > indexB ? 1 : -1; }); // Normalize the {index, amount} groups into bigger groups. const normalizedIndexes = arrayReduce(sortedIndexes, (acc, [groupIndex, groupAmount]) => { const previousItem = acc[acc.length - 1]; const [prevIndex, prevAmount] = previousItem; const prevLastIndex = prevIndex + prevAmount; if (groupIndex <= prevLastIndex) { const amountToAdd = Math.max(groupAmount - (prevLastIndex - groupIndex), 0); previousItem[1] += amountToAdd; } else { acc.push([groupIndex, groupAmount]); } return acc; }, [sortedIndexes[0]]); return normalizedIndexes; }; /* eslint-disable no-case-declarations */ switch (action) { case 'insert_row': const numberOfSourceRows = instance.countSourceRows(); if (tableMeta.maxRows === numberOfSourceRows) { return; } // eslint-disable-next-line no-param-reassign index = (isDefined(index)) ? index : numberOfSourceRows; delta = datamap.createRow(index, amount, source); if (delta) { metaManager.createRow(instance.toPhysicalRow(index), amount); const currentSelectedRange = selection.selectedRange.current(); const currentFromRange = currentSelectedRange?.from; const currentFromRow = currentFromRange?.row; // Moving down the selection (when it exist). It should be present on the "old" row. // TODO: The logic here should be handled by selection module. if (isDefined(currentFromRow) && currentFromRow >= index) { const { row: currentToRow, col: currentToColumn } = currentSelectedRange.to; let currentFromColumn = currentFromRange.col; // Workaround: headers are not stored inside selection. if (selection.isSelectedByRowHeader()) { currentFromColumn = -1; } // Remove from the stack the last added selection as that selection below will be // replaced by new transformed selection. selection.getSelectedRange().pop(); // I can't use transforms as they don't work in negative indexes. selection.setRangeStartOnly(new CellCoords(currentFromRow + delta, currentFromColumn), true); selection.setRangeEnd(new CellCoords(currentToRow + delta, currentToColumn)); // will call render() internally } else { instance._refreshBorders(); // it will call render and prepare methods } } break; case 'insert_col': delta = datamap.createCol(index, amount, source); if (delta) { metaManager.createColumn(instance.toPhysicalColumn(index), amount); if (Array.isArray(tableMeta.colHeaders)) { const spliceArray = [index, 0]; spliceArray.length += delta; // inserts empty (undefined) elements at the end of an array Array.prototype.splice.apply(tableMeta.colHeaders, spliceArray); // inserts empty (undefined) elements into the colHeader array } const currentSelectedRange = selection.selectedRange.current(); const currentFromRange = currentSelectedRange?.from; const currentFromColumn = currentFromRange?.col; // Moving right the selection (when it exist). It should be present on the "old" row. // TODO: The logic here should be handled by selection module. if (isDefined(currentFromColumn) && currentFromColumn >= index) { const { row: currentToRow, col: currentToColumn } = currentSelectedRange.to; let currentFromRow = currentFromRange.row; // Workaround: headers are not stored inside selection. if (selection.isSelectedByColumnHeader()) { currentFromRow = -1; } // Remove from the stack the last added selection as that selection below will be // replaced by new transformed selection. selection.getSelectedRange().pop(); // I can't use transforms as they don't work in negative indexes. selection.setRangeStartOnly(new CellCoords(currentFromRow, currentFromColumn + delta), true); selection.setRangeEnd(new CellCoords(currentToRow, currentToColumn + delta)); // will call render() internally } else { instance._refreshBorders(); // it will call render and prepare methods } } break; case 'remove_row': const removeRow = (indexes) => { let offset = 0; // Normalize the {index, amount} groups into bigger groups. arrayEach(indexes, ([groupIndex, groupAmount]) => { const calcIndex = isEmpty(groupIndex) ? instance.countRows() - 1 : Math.max(groupIndex - offset, 0); // If the 'index' is an integer decrease it by 'offset' otherwise pass it through to make the value // compatible with datamap.removeCol method. if (Number.isInteger(groupIndex)) { // eslint-disable-next-line no-param-reassign groupIndex = Math.max(groupIndex - offset, 0); } // TODO: for datamap.removeRow index should be passed as it is (with undefined and null values). If not, the logic // inside the datamap.removeRow breaks the removing functionality. const wasRemoved = datamap.removeRow(groupIndex, groupAmount, source); if (!wasRemoved) { return; } metaManager.removeRow(instance.toPhysicalRow(calcIndex), groupAmount); const totalRows = instance.countRows(); const fixedRowsTop = tableMeta.fixedRowsTop; if (fixedRowsTop >= calcIndex + 1) { tableMeta.fixedRowsTop -= Math.min(groupAmount, fixedRowsTop - calcIndex); } const fixedRowsBottom = tableMeta.fixedRowsBottom; if (fixedRowsBottom && calcIndex >= totalRows - fixedRowsBottom) { tableMeta.fixedRowsBottom -= Math.min(groupAmount, fixedRowsBottom); } offset += groupAmount; }); }; if (Array.isArray(index)) { removeRow(normalizeIndexesGroup(index)); } else { removeRow([[index, amount]]); } grid.adjustRowsAndCols(); instance._refreshBorders(); // it will call render and prepare methods break; case 'remove_col': const removeCol = (indexes) => { let offset = 0; // Normalize the {index, amount} groups into bigger groups. arrayEach(indexes, ([groupIndex, groupAmount]) => { const calcIndex = isEmpty(groupIndex) ? instance.countCols() - 1 : Math.max(groupIndex - offset, 0); let physicalColumnIndex = instance.toPhysicalColumn(calcIndex); // If the 'index' is an integer decrease it by 'offset' otherwise pass it through to make the value // compatible with datamap.removeCol method. if (Number.isInteger(groupIndex)) { // eslint-disable-next-line no-param-reassign groupIndex = Math.max(groupIndex - offset, 0); } // TODO: for datamap.removeCol index should be passed as it is (with undefined and null values). If not, the logic // inside the datamap.removeCol breaks the removing functionality. const wasRemoved = datamap.removeCol(groupIndex, groupAmount, source); if (!wasRemoved) { return; } metaManager.removeColumn(physicalColumnIndex, groupAmount); const fixedColumnsLeft = tableMeta.fixedColumnsLeft; if (fixedColumnsLeft >= calcIndex + 1) { tableMeta.fixedColumnsLeft -= Math.min(groupAmount, fixedColumnsLeft - calcIndex); } if (Array.isArray(tableMeta.colHeaders)) { if (typeof physicalColumnIndex === 'undefined') { physicalColumnIndex = -1; } tableMeta.colHeaders.splice(physicalColumnIndex, groupAmount); } offset += groupAmount; }); }; if (Array.isArray(index)) { removeCol(normalizeIndexesGroup(index)); } else { removeCol([[index, amount]]); } grid.adjustRowsAndCols(); instance._refreshBorders(); // it will call render and prepare methods break; default: throw new Error(`There is no such action "${action}"`); } if (!keepEmptyRows) { grid.adjustRowsAndCols(); // makes sure that we did not add rows that will be removed in next refresh } }, /** * Makes sure there are empty rows at the bottom of the table. * * @private */ adjustRowsAndCols() { const minRows = tableMeta.minRows; const minSpareRows = tableMeta.minSpareRows; const minCols = tableMeta.minCols; const minSpareCols = tableMeta.minSpareCols; if (minRows) { // should I add empty rows to data source to meet minRows? const nrOfRows = instance.countRows(); if (nrOfRows < minRows) { // The synchronization with cell meta is not desired here. For `minRows` option, // we don't want to touch/shift cell meta objects. datamap.createRow(nrOfRows, minRows - nrOfRows, 'auto'); } } if (minSpareRows) { const emptyRows = instance.countEmptyRows(true); // should I add empty rows to meet minSpareRows? if (emptyRows < minSpareRows) { const emptyRowsMissing = minSpareRows - emptyRows; const rowsToCreate = Math.min(emptyRowsMissing, tableMeta.maxRows - instance.countSourceRows()); // The synchronization with cell meta is not desired here. For `minSpareRows` option, // we don't want to touch/shift cell meta objects. datamap.createRow(instance.countRows(), rowsToCreate, 'auto'); } } { let emptyCols; // count currently empty cols if (minCols || minSpareCols) { emptyCols = instance.countEmptyCols(true); } let nrOfColumns = instance.countCols(); // should I add empty cols to meet minCols? if (minCols && !tableMeta.columns && nrOfColumns < minCols) { // The synchronization with cell meta is not desired here. For `minSpareRows` option, // we don't want to touch/shift cell meta objects. const colsToCreate = minCols - nrOfColumns; emptyCols += colsToCreate; datamap.createCol(nrOfColumns, colsToCreate, 'auto'); } // should I add empty cols to meet minSpareCols? if (minSpareCols && !tableMeta.columns && instance.dataType === 'array' && emptyCols < minSpareCols) { nrOfColumns = instance.countCols(); const emptyColsMissing = minSpareCols - emptyCols; const colsToCreate = Math.min(emptyColsMissing, tableMeta.maxCols - nrOfColumns); // The synchronization with cell meta is not desired here. For `minSpareRows` option, // we don't want to touch/shift cell meta objects. datamap.createCol(nrOfColumns, colsToCreate, 'auto'); } } const rowCount = instance.countRows(); const colCount = instance.countCols(); if (rowCount === 0 || colCount === 0) { selection.deselect(); } if (selection.isSelected()) { arrayEach(selection.selectedRange, (range) => { let selectionChanged = false; let fromRow = range.from.row; let fromCol = range.from.col; let toRow = range.to.row; let toCol = range.to.col; // if selection is outside, move selection to last row if (fromRow > rowCount - 1) { fromRow = rowCount - 1; selectionChanged = true; if (toRow > fromRow) { toRow = fromRow; } } else if (toRow > rowCount - 1) { toRow = rowCount - 1; selectionChanged = true; if (fromRow > toRow) { fromRow = toRow; } } // if selection is outside, move selection to last row if (fromCol > colCount - 1) { fromCol = colCount - 1; selectionChanged = true; if (toCol > fromCol) { toCol = fromCol; } } else if (toCol > colCount - 1) { toCol = colCount - 1; selectionChanged = true; if (fromCol > toCol) { fromCol = toCol; } } if (selectionChanged) { instance.selectCell(fromRow, fromCol, toRow, toCol); } }); } if (instance.view) { instance.view.adjustElementsSize(); } }, /** * Populate the data from the provided 2d array from the given cell coordinates. * * @private * @param {object} start Start selection position. Visual indexes. * @param {Array} input 2d data array. * @param {object} [end] End selection position (only for drag-down mode). Visual indexes. * @param {string} [source="populateFromArray"] Source information string. * @param {string} [method="overwrite"] Populate method. Possible options: `shift_down`, `shift_right`, `overwrite`. * @param {string} direction (left|right|up|down) String specifying the direction. * @param {Array} deltas The deltas array. A difference between values of adjacent cells. * Useful **only** when the type of handled cells is `numeric`. * @returns {object|undefined} Ending td in pasted area (only if any cell was changed). */ populateFromArray(start, input, end, source, method, direction, deltas) { // TODO: either remove or implement the `direction` argument. Currently it's not working at all. let r; let rlen; let c; let clen; const setData = []; const current = {}; const newDataByColumns = []; const startRow = start.row; const startColumn = start.col; rlen = input.length; if (rlen === 0) { return false; } let columnsPopulationEnd = 0; let rowsPopulationEnd = 0; if (isObject(end)) { columnsPopulationEnd = end.col - startColumn + 1; rowsPopulationEnd = end.row - startRow + 1; } // insert data with specified pasteMode method switch (method) { case 'shift_down': // translate data from a list of rows to a list of columns const populatedDataByColumns = pivot(input); const numberOfDataColumns = populatedDataByColumns.length; // method's argument can extend the range of data population (data would be repeated) const numberOfColumnsToPopulate = Math.max(numberOfDataColumns, columnsPopulationEnd); const pushedDownDataByRows = instance.getData().slice(startRow); // translate data from a list of rows to a list of columns const pushedDownDataByColumns = pivot(pushedDownDataByRows) .slice(startColumn, startColumn + numberOfColumnsToPopulate); for (c = 0; c < numberOfColumnsToPopulate; c += 1) { if (c < numberOfDataColumns) { for (r = 0, rlen = populatedDataByColumns[c].length; r < rowsPopulationEnd - rlen; r += 1) { // repeating data for rows populatedDataByColumns[c].push(populatedDataByColumns[c][r % rlen]); } if (c < pushedDownDataByColumns.length) { newDataByColumns.push(populatedDataByColumns[c].concat(pushedDownDataByColumns[c])); } else { // if before data population, there was no data in the column // we fill the required rows' newly-created cells with `null` values newDataByColumns.push(populatedDataByColumns[c].concat( new Array(pushedDownDataByRows.length).fill(null))); } } else { // Repeating data for columns. newDataByColumns.push(populatedDataByColumns[c % numberOfDataColumns].concat(pushedDownDataByColumns[c])); } } instance.populateFromArray(startRow, startColumn, pivot(newDataByColumns)); break; case 'shift_right': const numberOfDataRows = input.length; // method's argument can extend the range of data population (data would be repeated) const numberOfRowsToPopulate = Math.max(numberOfDataRows, rowsPopulationEnd); const pushedRightDataByRows = instance.getData().slice(startRow).map(rowData => rowData.slice(startColumn)); for (r = 0; r < numberOfRowsToPopulate; r += 1) { if (r < numberOfDataRows) { for (c = 0, clen = input[r].length; c < columnsPopulationEnd - clen; c += 1) { // repeating data for rows input[r].push(input[r][c % clen]); } if (r < pushedRightDataByRows.length) { for (let i = 0; i < pushedRightDataByRows[r].length; i += 1) { input[r].push(pushedRightDataByRows[r][i]); } } else { // if before data population, there was no data in the row // we fill the required columns' newly-created cells with `null` values input[r].push(...new Array(pushedRightDataByRows[0].length).fill(null)); } } else { // Repeating data for columns. input.push(input[r % rlen].slice(0, numberOfRowsToPopulate).concat(pushedRightDataByRows[r])); } } instance.populateFromArray(startRow, startColumn, input); break; case 'overwrite': default: // overwrite and other not specified options current.row = start.row; current.col = start.col; const selected = { // selected range row: (end && start) ? (end.row - start.row + 1) : 1, col: (end && start) ? (end.col - start.col + 1) : 1 }; let skippedRow = 0; let skippedColumn = 0; let pushData = true; let cellMeta; const getInputValue = function getInputValue(row, col = null) { const rowValue = input[row % input.length]; if (col !== null) { return rowValue[col % rowValue.length]; } return rowValue; }; const rowInputLength = input.length; const rowSelectionLength = end ? end.row - start.row + 1 : 0; if (end) { rlen = rowSelectionLength; } else { rlen = Math.max(rowInputLength, rowSelectionLength); } for (r = 0; r < rlen; r++) { if ((end && current.row > end.row && rowSelectionLength > rowInputLength) || (!tableMeta.allowInsertRow && current.row > instance.countRows() - 1) || (current.row >= tableMeta.maxRows)) { break; } const visualRow = r - skippedRow; const colInputLength = getInputValue(visualRow).length; const colSelectionLength = end ? end.col - start.col + 1 : 0; if (end) { clen = colSelectionLength; } else { clen = Math.max(colInputLength, colSelectionLength); } current.col = start.col; cellMeta = instance.getCellMeta(current.row, current.col); if ((source === 'CopyPaste.paste' || source === 'Autofill.fill') && cellMeta.skipRowOnPaste) { skippedRow += 1; current.row += 1; rlen += 1; /* eslint-disable no-continue */ continue; } skippedColumn = 0; for (c = 0; c < clen; c++) { if ((end && current.col > end.col && colSelectionLength > colInputLength) || (!tableMeta.allowInsertColumn && current.col > instance.countCols() - 1) || (current.col >= tableMeta.maxCols)) { break; } cellMeta = instance.getCellMeta(current.row, current.col); if ((source === 'CopyPaste.paste' || source === 'Autofill.fill') && cellMeta.skipColumnOnPaste) { skippedColumn += 1; current.col += 1; clen += 1; continue; } if (cellMeta.readOnly && source !== 'UndoRedo.undo') { current.col += 1; /* eslint-disable no-continue */ continue; } const visualColumn = c - skippedColumn; let value = getInputValue(visualRow, visualColumn); let orgValue = instance.getDataAtCell(current.row, current.col); const index = { row: visualRow, col: visualColumn }; if (source === 'Autofill.fill') { const result = instance .runHooks('beforeAutofillInsidePopulate', index, direction, input, deltas, {}, selected); if (result) { value = isUndefined(result.value) ? value : result.value; } } if (value !== null && typeof value === 'object') { // when 'value' is array and 'orgValue' is null, set 'orgValue' to // an empty array so that the null value can be compared to 'value' // as an empty value for the array context if (Array.isArray(value) && orgValue === null) orgValue = []; if (orgValue === null || typeof orgValue !== 'object') { pushData = false; } else { const orgValueSchema = duckSchema(Array.isArray(orgValue) ? orgValue : (orgValue[0] || orgValue)); const valueSchema = duckSchema(Array.isArray(value) ? value : (value[0] || value)); /* eslint-disable max-depth */ if (isObjectEqual(orgValueSchema, valueSchema)) { value = deepClone(value); } else { pushData = false; } } } else if (orgValue !== null && typeof orgValue === 'object') { pushData = false; } if (pushData) { setData.push([current.row, current.col, value]); } pushData = true; current.col += 1; } current.row += 1; } instance.setDataAtCell(setData, null, null, source || 'populateFromArray'); break; } }, }; /** * Internal function to set `language` key of settings. * * @private * @param {string} languageCode Language code for specific language i.e. 'en-US', 'pt-BR', 'de-DE'. * @fires Hooks#afterLanguageChange */ function setLanguage(languageCode) { const normalizedLanguageCode = normalizeLanguageCode(languageCode); if (hasLanguageDictionary(normalizedLanguageCode)) { instance.runHooks('beforeLanguageChange', normalizedLanguageCode); globalMeta.language = normalizedLanguageCode; instance.runHooks('afterLanguageChange', normalizedLanguageCode); } else { warnUserAboutLanguageRegistration(languageCode); } } /** * Internal function to set `className` or `tableClassName`, depending on the key from the settings object. * * @private * @param {string} className `className` or `tableClassName` from the key in the settings object. * @param {string|string[]} classSettings String or array of strings. Contains class name(s) from settings object. */ function setClassName(className, classSettings) { const element = className === 'className' ? instance.rootElement : instance.table; if (firstRun) { addClass(element, classSettings); } else { let globalMetaSettingsArray = []; let settingsArray = []; if (globalMeta[className]) { globalMetaSettingsArray = Array.isArray(globalMeta[className]) ? globalMeta[className] : stringToArray(globalMeta[className]); } if (classSettings) { settingsArray = Array.isArray(classSettings) ? classSettings : stringToArray(classSettings); } const classNameToRemove = getDifferenceOfArrays(globalMetaSettingsArray, settingsArray); const classNameToAdd = getDifferenceOfArrays(settingsArray, globalMetaSettingsArray); if (classNameToRemove.length) { removeClass(element, classNameToRemove); } if (classNameToAdd.length) { addClass(element, classNameToAdd); } } globalMeta[className] = classSettings; } this.init = function() { dataSource.setData(tableMeta.data); instance.runHooks('beforeInit'); if (isMobileBrowser() || isIpadOS()) { addClass(instance.rootElement, 'mobile'); } this.updateSettings(tableMeta, true); this.view = new TableView(this); editorManager = EditorManager.getInstance(instance, tableMeta, selection); instance.runHooks('init'); this.forceFullRender = true; // used when data was changed this.view.render(); if (typeof firstRun === 'object') { instance.runHooks('afterChange', firstRun[0], firstRun[1]); firstRun = false; } instance.runHooks('afterInit'); }; /** * @ignore * @returns {object} */ function ValidatorsQueue() { // moved this one level up so it can be used in any function here. Probably this should be moved to a separate file let resolved = false; return { validatorsInQueue: 0, valid: true, addValidatorToQueue() { this.validatorsInQueue += 1; resolved = false; }, removeValidatorFormQueue() { this.validatorsInQueue = this.validatorsInQueue - 1 < 0 ? 0 : this.validatorsInQueue - 1; this.checkIfQueueIsEmpty(); }, onQueueEmpty() { }, checkIfQueueIsEmpty() { if (this.validatorsInQueue === 0 && resolved === false) { resolved = true; this.onQueueEmpty(this.valid); } } }; } /** * Get parsed number from numeric string. * * @private * @param {string} numericData Float (separated by a dot or a comma) or integer. * @returns {number} Number if we get data in parsable format, not changed value otherwise. */ function getParsedNumber(numericData) { // Unifying "float like" string. Change from value with comma determiner to value with dot determiner, // for example from `450,65` to `450.65`. const unifiedNumericData = numericData.replace(',', '.'); if (isNaN(parseFloat(unifiedNumericData)) === false) { return parseFloat(unifiedNumericData); } return numericData; } /** * @ignore * @param {Array} changes The 2D array containing information about each of the edited cells. * @param {string} source The string that identifies source of validation. * @param {Function} callback The callback function fot async validation. */ function validateChanges(changes, source, callback) { if (!changes.length) { return; } const activeEditor = instance.getActiveEditor(); const beforeChangeResult = instance.runHooks('beforeChange', changes, source || 'edit'); let shouldBeCanceled = true; if (beforeChangeResult === false) { if (activeEditor) { activeEditor.cancelChanges(); } return; } const waitingForValidator = new ValidatorsQueue(); waitingForValidator.onQueueEmpty = (isValid) => { if (activeEditor && shouldBeCanceled) { activeEditor.cancelChanges(); } callback(isValid); // called when async validators are resolved and beforeChange was not async }; for (let i = changes.length - 1; i >= 0; i--) { if (changes[i] === null) { changes.splice(i, 1); } else { const [row, prop, , newValue] = changes[i]; const col = datamap.propToCol(prop); const cellProperties = instance.getCellMeta(row, col); if (cellProperties.type === 'numeric' && typeof newValue === 'string' && isNumericLike(newValue)) { changes[i][3] = getParsedNumber(newValue); } /* eslint-disable no-loop-func */ if (instance.getCellValidator(cellProperties)) { waitingForValidator.addValidatorToQueue(); instance.validateCell(changes[i][3], cellProperties, (function(index, cellPropertiesReference) { return function(result) { if (typeof result !== 'boolean') { throw new Error('Validation error: result is not boolean'); } if (result === false && cellPropertiesReference.allowInvalid === false) { shouldBeCanceled = false; changes.splice(index, 1); // cancel the change cellPropertiesReference.valid = true; // we cancelled the change, so cell value is still valid const cell = instance.getCell(cellPropertiesReference.visualRow, cellPropertiesReference.visualCol); if (cell !== null) { removeClass(cell, tableMeta.invalidCellClassName); } // index -= 1; } waitingForValidator.removeValidatorFormQueue(); }; }(i, cellProperties)), source); } } } waitingForValidator.checkIfQueueIsEmpty(); } /** * Internal function to apply changes. Called after validateChanges. * * @private * @param {Array} changes Array in form of [row, prop, oldValue, newValue]. * @param {string} source String that identifies how this change will be described in changes array (useful in onChange callback). * @fires Hooks#beforeChangeRender * @fires Hooks#afterChange */ function applyChanges(changes, source) { let i = changes.length - 1; if (i < 0) { return; } for (; i >= 0; i--) { let skipThisChange = false; if (changes[i] === null) { changes.splice(i, 1); /* eslint-disable no-continue */ continue; } if ((changes[i][2] === null || changes[i][2] === void 0) && (changes[i][3] === null || changes[i][3] === void 0)) { /* eslint-disable no-continue */ continue; } if (tableMeta.allowInsertRow) { while (changes[i][0] > instance.countRows() - 1) { const numberOfCreatedRows = datamap.createRow(void 0, void 0, source); if (numberOfCreatedRows >= 1) { metaManager.createRow(null, numberOfCreatedRows); } else { skipThisChange = true; break; } } } if (instance.dataType === 'array' && (!tableMeta.columns || tableMeta.columns.length === 0) && tableMeta.allowInsertColumn) { while (datamap.propToCol(changes[i][1]) > instance.countCols() - 1) { const numberOfCreatedColumns = datamap.createCol(void 0, void 0, source); if (numberOfCreatedColumns >= 1) { metaManager.createColumn(null, numberOfCreatedColumns); } else { skipThisChange = true; break; } } } if (skipThisChange) { /* eslint-disable no-continue */ continue; } datamap.set(changes[i][0], changes[i][1], changes[i][3]); } instance.forceFullRender = true; // used when data was changed grid.adjustRowsAndCols(); instance.runHooks('beforeChangeRender', changes, source); editorManager.lockEditor(); instance._refreshBorders(null); editorManager.unlockEditor(); instance.view.adjustElementsSize(); instance.runHooks('afterChange', changes, source || 'edit'); const activeEditor = instance.getActiveEditor(); if (activeEditor && isDefined(activeEditor.refreshValue)) { activeEditor.refreshValue(); } } /** * Validate a single cell. * * @memberof Core# * @function validateCell * @param {string|number} value The value to validate. * @param {object} cellProperties The cell meta which corresponds with the value. * @param {Function} callback The callback function. * @param {string} source The string that identifies source of the validation. */ this.validateCell = function(value, cellProperties, callback, source) { let validator = instance.getCellValidator(cellProperties); // the `canBeValidated = false` argument suggests, that the cell passes validation by default. /** * @private * @function done * @param {boolean} valid Indicates if the validation was successful. * @param {boolean} [canBeValidated=true] Flag which controls the validation process. */ function done(valid, canBeValidated = true) { // Fixes GH#3903 if (!canBeValidated || cellProperties.hidden === true) { callback(valid); return; } const col = cellProperties.visualCol; const row = cellProperties.visualRow; const td = instance.getCell(row, col, true); if (td && td.nodeName !== 'TH') { const renderableRow = instance.rowIndexMapper.getRenderableFromVisualIndex(row); const renderableColumn = instance.columnIndexMapper.getRenderableFromVisualIndex(col); instance.view.wt.wtSettings.settings.cellRenderer(renderableRow, renderableColumn, td); } callback(valid); } if (isRegExp(validator)) { validator = (function(expression) { return function(cellValue, validatorCallback) { validatorCallback(expression.test(cellValue)); }; }(validator)); } if (isFunction(validator)) { // eslint-disable-next-line no-param-reassign value = instance.runHooks('beforeValidate', value, cellProperties.visualRow, cellProperties.prop, source); // To provide consistent behaviour, validation should be always asynchronous instance._registerImmediate(() => { validator.call(cellProperties, value, (valid) => { if (!instance) { return; } // eslint-disable-next-line no-param-reassign valid = instance .runHooks('afterValidate', valid, value, cellProperties.visualRow, cellProperties.prop, source); cellProperties.valid = valid; done(valid); instance.runHooks('postAfterValidate', valid, value, cellProperties.visualRow, cellProperties.prop, source); }); }); } else { // resolve callback even if validator function was not found instance._registerImmediate(() => { cellProperties.valid = true; done(cellProperties.valid, false); }); } }; /** * @ignore * @param {number} row The visual row index. * @param {string|number} propOrCol The visual prop or column index. * @param {*} value The cell value. * @returns {Array} */ function setDataInputToArray(row, propOrCol, value) { if (Array.isArray(row)) { // it's an array of changes return row; } return [[row, propOrCol, value]]; } /** * @description * Set new value to a cell. To change many cells at once (recommended way), pass an array of `changes` in format * `[[row, col, value],...]` as the first argument. * * @memberof Core# * @function setDataAtCell * @param {number|Array} row Visual row index or array of changes in format `[[row, col, value],...]`. * @param {number} [column] Visual column index. * @param {string} [value] New value. * @param {string} [source] String that identifies how this change will be described in the changes array (useful in afterChange or beforeChange callback). Set to 'edit' if left empty. */ this.setDataAtCell = function(row, column, value, source) { const input = setDataInputToArray(row, column, value); const changes = []; let changeSource = source; let i; let ilen; let prop; for (i = 0, ilen = input.length; i < ilen; i++) { if (typeof input[i] !== 'object') { throw new Error('Method `setDataAtCell` accepts row number or changes array of arrays as its first parameter'); } if (typeof input[i][1] !== 'number') { throw new Error('Method `setDataAtCell` accepts row and column number as its parameters. If you want to use object property name, use method `setDataAtRowProp`'); // eslint-disable-line max-len } if (input[i][1] >= this.countCols()) { prop = input[i][1]; } else { prop = datamap.colToProp(input[i][1]); } changes.push([ input[i][0], prop, dataSource.getAtCell(this.toPhysicalRow(input[i][0]), input[i][1]), input[i][2], ]); } if (!changeSource && typeof row === 'object') { changeSource = column; } instance.runHooks('afterSetDataAtCell', changes, changeSource); validateChanges(changes, changeSource, () => { applyChanges(changes, changeSource); }); }; /** * @description * Set new value to a cell. To change many cells at once (recommended way), pass an array of `changes` in format * `[[row, prop, value],...]` as the first argument. * * @memberof Core# * @function setDataAtRowProp * @param {number|Array} row Visual row index or array of changes in format `[[row, prop, value], ...]`. * @param {string} prop Property name or the source string (e.g. `'first.name'` or `'0'`). * @param {string} value Value to be set. * @param {string} [source] String that identifies how this change will be described in changes array (useful in onChange callback). */ this.setDataAtRowProp = function(row, prop, value, source) { const input = setDataInputToArray(row, prop, value); const changes = []; let changeSource = source; let i; let ilen; for (i = 0, ilen = input.length; i < ilen; i++) { changes.push([ input[i][0], input[i][1], dataSource.getAtCell(this.toPhysicalRow(input[i][0]), input[i][1]), input[i][2], ]); } if (!changeSource && typeof row === 'object') { changeSource = prop; } instance.runHooks('afterSetDataAtRowProp', changes, changeSource); validateChanges(changes, changeSource, () => { applyChanges(changes, changeSource); }); }; /** * Listen to the keyboard input on document body. This allows Handsontable to capture keyboard events and respond * in the right way. * * @memberof Core# * @function listen * @fires Hooks#afterListen */ this.listen = function() { if (instance && !instance.isListening()) { activeGuid = instance.guid; instance.runHooks('afterListen'); } }; /** * Stop listening to keyboard input on the document body. Calling this method makes the Handsontable inactive for * any keyboard events. * * @memberof Core# * @function unlisten */ this.unlisten = function() { if (this.isListening()) { activeGuid = null; instance.runHooks('afterUnlisten'); } }; /** * Returns `true` if the current Handsontable instance is listening to keyboard input on document body. * * @memberof Core# * @function isListening * @returns {boolean} `true` if the instance is listening, `false` otherwise. */ this.isListening = function() { return activeGuid === instance.guid; }; /** * Destroys the current editor, render the table and prepares the editor of the newly selected cell. * * @memberof Core# * @function destroyEditor * @param {boolean} [revertOriginal=false] If `true`, the previous value will be restored. Otherwise, the edited value will be saved. * @param {boolean} [prepareEditorIfNeeded=true] If `true` the editor under the selected cell will be prepared to open. */ this.destroyEditor = function(revertOriginal = false, prepareEditorIfNeeded = true) { instance._refreshBorders(revertOriginal, prepareEditorIfNeeded); }; /** * Populate cells at position with 2D input array (e.g. `[[1, 2], [3, 4]]`). Use `endRow`, `endCol` when you * want to cut input when a certain row is reached. * * Optional `method` argument has the same effect as pasteMode option (see {@link Options#pasteMode}). * * @memberof Core# * @function populateFromArray * @param {number} row Start visual row index. * @param {number} column Start visual column index. * @param {Array} input 2d array. * @param {number} [endRow] End visual row index (use when you want to cut input when certain row is reached). * @param {number} [endCol] End visual column index (use when you want to cut input when certain column is reached). * @param {string} [source=populateFromArray] Used to identify this call in the resulting events (beforeChange, afterChange). * @param {string} [method=overwrite] Populate method, possible values: `'shift_down'`, `'shift_right'`, `'overwrite'`. * @param {string} direction Populate direction, possible values: `'left'`, `'right'`, `'up'`, `'down'`. * @param {Array} deltas The deltas array. A difference between values of adjacent cells. * Useful **only** when the type of handled cells is `numeric`. * @returns {object|undefined} Ending td in pasted area (only if any cell was changed). */ this.populateFromArray = function(row, column, input, endRow, endCol, source, method, direction, deltas) { if (!(typeof input === 'object' && typeof input[0] === 'object')) { throw new Error('populateFromArray parameter `input` must be an array of arrays'); // API changed in 0.9-beta2, let's check if you use it correctly } const c = typeof endRow === 'number' ? new CellCoords(endRow, endCol) : null; return grid.populateFromArray(new CellCoords(row, column), input, c, source, method, direction, deltas); }; /** * Adds/removes data from the column. This method works the same as Array.splice for arrays. * * @memberof Core# * @function spliceCol * @param {number} column Index of the column in which do you want to do splice. * @param {number} index Index at which to start changing the array. If negative, will begin that many elements from the end. * @param {number} amount An integer indicating the number of old array elements to remove. If amount is 0, no elements are removed. * @param {...number} [elements] The elements to add to the array. If you don't specify any elements, spliceCol simply removes elements from the array. * @returns {Array} Returns removed portion of columns. */ this.spliceCol = function(column, index, amount, ...elements) { return datamap.spliceCol(column, index, amount, ...elements); }; /** * Adds/removes data from the row. This method works the same as Array.splice for arrays. * * @memberof Core# * @function spliceRow * @param {number} row Index of column in which do you want to do splice. * @param {number} index Index at which to start changing the array. If negative, will begin that many elements from the end. * @param {number} amount An integer indicating the number of old array elements to remove. If amount is 0, no elements are removed. * @param {...number} [elements] The elements to add to the array. If you don't specify any elements, spliceCol simply removes elements from the array. * @returns {Array} Returns removed portion of rows. */ this.spliceRow = function(row, index, amount, ...elements) { return datamap.spliceRow(row, index, amount, ...elements); }; /** * Returns indexes of the currently selected cells as an array of arrays `[[startRow, startCol, endRow, endCol],...]`. * * Start row and start column are the coordinates of the active cell (where the selection was started). * * The version 0.36.0 adds a non-consecutive selection feature. Since this version, the method returns an array of arrays. * Additionally to collect the coordinates of the currently selected area (as it was previously done by the method) * you need to use `getSelectedLast` method. * * @memberof Core# * @function getSelected * @returns {Array[]|undefined} An array of arrays of the selection's coordinates. */ this.getSelected = function() { // https://github.com/handsontable/handsontable/issues/44 //cjl if (selection.isSelected()) { return arrayMap(selection.getSelectedRange(), ({ from, to }) => [from.row, from.col, to.row, to.col]); } }; /** * Returns the last coordinates applied to the table as a an array `[startRow, startCol, endRow, endCol]`. * * @since 0.36.0 * @memberof Core# * @function getSelectedLast * @returns {Array|undefined} An array of the selection's coordinates. */ this.getSelectedLast = function() { const selected = this.getSelected(); let result; if (selected && selected.length > 0) { result = selected[selected.length - 1]; } return result; }; /** * Returns the current selection as an array of CellRange objects. * * The version 0.36.0 adds a non-consecutive selection feature. Since this version, the method returns an array of arrays. * Additionally to collect the coordinates of the currently selected area (as it was previously done by the method) * you need to use `getSelectedRangeLast` method. * * @memberof Core# * @function getSelectedRange * @returns {CellRange[]|undefined} Selected range object or undefined if there is no selection. */ this.getSelectedRange = function() { // https://github.com/handsontable/handsontable/issues/44 //cjl if (selection.isSelected()) { return Array.from(selection.getSelectedRange()); } }; /** * Returns the last coordinates applied to the table as a CellRange object. * * @memberof Core# * @function getSelectedRangeLast * @since 0.36.0 * @returns {CellRange|undefined} Selected range object or undefined` if there is no selection. */ this.getSelectedRangeLast = function() { const selectedRange = this.getSelectedRange(); let result; if (selectedRange && selectedRange.length > 0) { result = selectedRange[selectedRange.length - 1]; } return result; }; /** * Erases content from cells that have been selected in the table. * * @memberof Core# * @function emptySelectedCells * @param {string} [source] String that identifies how this change will be described in the changes array (useful in afterChange or beforeChange callback). Set to 'edit' if left empty. * @since 0.36.0 */ this.emptySelectedCells = function(source) { if (!selection.isSelected() || this.countRows() === 0 || this.countCols() === 0) { return; } const changes = []; arrayEach(selection.getSelectedRange(), (cellRange) => { const topLeft = cellRange.getTopLeftCorner(); const bottomRight = cellRange.getBottomRightCorner(); rangeEach(topLeft.row, bottomRight.row, (row) => { rangeEach(topLeft.col, bottomRight.col, (column) => { if (!this.getCellMeta(row, column).readOnly) { changes.push([row, column, null]); } }); }); }); if (changes.length > 0) { this.setDataAtCell(changes, source); } }; /** * Checks if the table rendering process was suspended. See explanation in {@link Core#suspendRender}. * * @memberof Core# * @function isRenderSuspended * @since 8.3.0 * @returns {boolean} */ this.isRenderSuspended = function() { return this.renderSuspendedCounter > 0; }; /** * Suspends the rendering process. It's helpful to wrap the table render * cycles triggered by API calls or UI actions (or both) and call the "render" * once in the end. As a result, it improves the performance of wrapped operations. * When the table is in the suspend state, most operations will have no visual * effect until the rendering state is resumed. Resuming the state automatically * invokes the table rendering. To make sure that after executing all operations, * the table will be rendered, it's highly recommended to use the {@link Core#batchRender} * method or {@link Core#batch}, which additionally aggregates the logic execution * that happens behind the table. * * The method is intended to be used by advanced users. Suspending the rendering * process could cause visual glitches when wrongly implemented. * * @memberof Core# * @function suspendRender * @since 8.3.0 * @example * ```js * hot.suspendRender(); * hot.alter('insert_row', 5, 45); * hot.alter('insert_col', 10, 40); * hot.setDataAtCell(1, 1, 'John'); * hot.setDataAtCell(2, 2, 'Mark'); * hot.setDataAtCell(3, 3, 'Ann'); * hot.setDataAtCell(4, 4, 'Sophia'); * hot.setDataAtCell(5, 5, 'Mia'); * hot.selectCell(0, 0); * hot.resumeRender(); // It re-renders the table internally * ``` */ this.suspendRender = function() { this.renderSuspendedCounter += 1; }; /** * Resumes the rendering process. In combination with the {@link Core#suspendRender} * method it allows aggregating the table render cycles triggered by API calls or UI * actions (or both) and calls the "render" once in the end. When the table is in * the suspend state, most operations will have no visual effect until the rendering * state is resumed. Resuming the state automatically invokes the table rendering. * * The method is intended to be used by advanced users. Suspending the rendering * process could cause visual glitches when wrongly implemented. * * @memberof Core# * @function resumeRender * @since 8.3.0 * @example * ```js * hot.suspendRender(); * hot.alter('insert_row', 5, 45); * hot.alter('insert_col', 10, 40); * hot.setDataAtCell(1, 1, 'John'); * hot.setDataAtCell(2, 2, 'Mark'); * hot.setDataAtCell(3, 3, 'Ann'); * hot.setDataAtCell(4, 4, 'Sophia'); * hot.setDataAtCell(5, 5, 'Mia'); * hot.selectCell(0, 0); * hot.resumeRender(); // It re-renders the table internally * ``` */ this.resumeRender = function() { const nextValue = this.renderSuspendedCounter - 1; this.renderSuspendedCounter = Math.max(nextValue, 0); if (!this.isRenderSuspended() && nextValue === this.renderSuspendedCounter) { if (this.renderCall) { this.render(); } else { this._refreshBorders(null); } } }; /** * Rerender the table. Calling this method starts the process of recalculating, redrawing and applying the changes * to the DOM. While rendering the table all cell renderers are recalled. * * Calling this method manually is not recommended. Handsontable tries to render itself by choosing the most * optimal moments in its lifecycle. * * @memberof Core# * @function render */ this.render = function() { if (this.view) { this.renderCall = true; this.forceFullRender = true; // used when data was changed if (!this.isRenderSuspended()) { editorManager.lockEditor(); this._refreshBorders(null); editorManager.unlockEditor(); } } }; /** * The method aggregates multi-line API calls into a callback and postpones the * table rendering process. After the execution of the operations, the table is * rendered once. As a result, it improves the performance of wrapped operations. * Without batching, a similar case could trigger multiple table render calls. * * @memberof Core# * @function batchRender * @param {Function} wrappedOperations Batched operations wrapped in a function. * @returns {*} Returns result from the wrappedOperations callback. * @since 8.3.0 * @example * ```js * hot.batchRender(() => { * hot.alter('insert_row', 5, 45); * hot.alter('insert_col', 10, 40); * hot.setDataAtCell(1, 1, 'John'); * hot.setDataAtCell(2, 2, 'Mark'); * hot.setDataAtCell(3, 3, 'Ann'); * hot.setDataAtCell(4, 4, 'Sophia'); * hot.setDataAtCell(5, 5, 'Mia'); * hot.selectCell(0, 0); * // The table will be rendered once after executing the callback * }); * ``` */ this.batchRender = function(wrappedOperations) { this.suspendRender(); const result = wrappedOperations(); this.resumeRender(); return result; }; /** * Checks if the table indexes recalculation process was suspended. See explanation * in {@link Core#suspendExecution}. * * @memberof Core# * @function isExecutionSuspended * @since 8.3.0 * @returns {boolean} */ this.isExecutionSuspended = function() { return this.executionSuspendedCounter > 0; }; /** * Suspends the execution process. It's helpful to wrap the table logic changes * such as index changes into one call after which the cache is updated. As a result, * it improves the performance of wrapped operations. * * The method is intended to be used by advanced users. Suspending the execution * process could cause visual glitches caused by not updated the internal table cache. * * @memberof Core# * @function suspendExecution * @since 8.3.0 * @example * ```js * hot.suspendExecution(); * const filters = hot.getPlugin('filters'); * * filters.addCondition(2, 'contains', ['3']); * filters.filter(); * hot.getPlugin('columnSorting').sort({ column: 1, sortOrder: 'desc' }); * hot.resumeExecution(); // It updates the cache internally * ``` */ this.suspendExecution = function() { this.executionSuspendedCounter += 1; this.columnIndexMapper.suspendOperations(); this.rowIndexMapper.suspendOperations(); }; /** * Resumes the execution process. In combination with the {@link Core#suspendExecution} * method it allows aggregating the table logic changes after which the cache is * updated. Resuming the state automatically invokes the table cache updating process. * * The method is intended to be used by advanced users. Suspending the execution * process could cause visual glitches caused by not updated the internal table cache. * * @memberof Core# * @function resumeExecution * @param {boolean} [forceFlushChanges=false] If `true`, the table internal data cache * is recalculated after the execution of the batched operations. For nested * {@link Core#batchExecution} calls, it can be desire to recalculate the table * after each batch. * @since 8.3.0 * @example * ```js * hot.suspendExecution(); * const filters = hot.getPlugin('filters'); * * filters.addCondition(2, 'contains', ['3']); * filters.filter(); * hot.getPlugin('columnSorting').sort({ column: 1, sortOrder: 'desc' }); * hot.resumeExecution(); // It updates the cache internally * ``` */ this.resumeExecution = function(forceFlushChanges = false) { const nextValue = this.executionSuspendedCounter - 1; this.executionSuspendedCounter = Math.max(nextValue, 0); if ((!this.isExecutionSuspended() && nextValue === this.executionSuspendedCounter) || forceFlushChanges) { this.columnIndexMapper.resumeOperations(); this.rowIndexMapper.resumeOperations(); } }; /** * The method aggregates multi-line API calls into a callback and postpones the * table execution process. After the execution of the operations, the internal table * cache is recalculated once. As a result, it improves the performance of wrapped * operations. Without batching, a similar case could trigger multiple table cache rebuilds. * * @memberof Core# * @function batchExecution * @param {Function} wrappedOperations Batched operations wrapped in a function. * @param {boolean} [forceFlushChanges=false] If `true`, the table internal data cache * is recalculated after the execution of the batched operations. For nested calls, * it can be a desire to recalculate the table after each batch. * @returns {*} Returns result from the wrappedOperations callback. * @since 8.3.0 * @example * ```js * hot.batchExecution(() => { * const filters = hot.getPlugin('filters'); * * filters.addCondition(2, 'contains', ['3']); * filters.filter(); * hot.getPlugin('columnSorting').sort({ column: 1, sortOrder: 'desc' }); * // The table cache will be recalculated once after executing the callback * }); * ``` */ this.batchExecution = function(wrappedOperations, forceFlushChanges = false) { this.suspendExecution(); const result = wrappedOperations(); this.resumeExecution(forceFlushChanges); return result; }; /** * It batches the rendering process and index recalculations. The method aggregates * multi-line API calls into a callback and postpones the table rendering process * as well aggregates the table logic changes such as index changes into one call * after which the cache is updated. After the execution of the operations, the * table is rendered, and the cache is updated once. As a result, it improves the * performance of wrapped operations. * * @memberof Core# * @function batch * @param {Function} wrappedOperations Batched operations wrapped in a function. * @returns {*} Returns result from the wrappedOperations callback. * @since 8.3.0 * @example * ```js * hot.batch(() => { * hot.alter('insert_row', 5, 45); * hot.alter('insert_col', 10, 40); * hot.setDataAtCell(1, 1, 'x'); * hot.setDataAtCell(2, 2, 'c'); * hot.setDataAtCell(3, 3, 'v'); * hot.setDataAtCell(4, 4, 'b'); * hot.setDataAtCell(5, 5, 'n'); * hot.selectCell(0, 0); * * const filters = hot.getPlugin('filters'); * * filters.addCondition(2, 'contains', ['3']); * filters.filter(); * hot.getPlugin('columnSorting').sort({ column: 1, sortOrder: 'desc' }); * // The table will be re-rendered and cache will be recalculated once after executing the callback * }); * ``` */ this.batch = function(wrappedOperations) { this.suspendRender(); this.suspendExecution(); const result = wrappedOperations(); this.resumeExecution(); this.resumeRender(); return result; }; /** * Updates dimensions of the table. The method compares previous dimensions with the current ones and updates accordingly. * * @memberof Core# * @function refreshDimensions * @fires Hooks#beforeRefreshDimensions * @fires Hooks#afterRefreshDimensions */ this.refreshDimensions = function() { if (!instance.view) { return; } const { width: lastWidth, height: lastHeight } = instance.view.getLastSize(); const { width, height } = instance.rootElement.getBoundingClientRect(); const isSizeChanged = width !== lastWidth || height !== lastHeight; const isResizeBlocked = instance.runHooks( 'beforeRefreshDimensions', { width: lastWidth, height: lastHeight }, { width, height }, isSizeChanged ) === false; if (isResizeBlocked) { return; } if (isSizeChanged || instance.view.wt.wtOverlays.scrollableElement === instance.rootWindow) { instance.view.setLastSize(width, height); instance.render(); } instance.runHooks( 'afterRefreshDimensions', { width: lastWidth, height: lastHeight }, { width, height }, isSizeChanged ); }; /** * Loads new data to Handsontable. Loading new data resets the cell meta. * Since 8.0.0 loading new data also resets states corresponding to rows and columns * (for example, row/column sequence, column width, row height, frozen columns etc.). * * @memberof Core# * @function setData * @since 11.1.0 * @param {Array} data Array of arrays or array of objects containing data. * @param {string} [source] Source of the `setData` call. * @fires Hooks#beforeLoadData * @fires Hooks#beforeSetData * @fires Hooks#beforeUpdateData * @fires Hooks#afterLoadData * @fires Hooks#afterSetData * @fires Hooks#afterUpdateData * @fires Hooks#afterChange */ this.setData = function(data, source) { replaceData( data, (newDataMap) => { datamap = newDataMap; }, () => { metaManager.clearCellsCache(); instance.initIndexMappers(); grid.adjustRowsAndCols(); if (firstRun) { firstRun = [null, 'loadData']; } }, { hotInstance: instance, dataMap: datamap, dataSource, internalSource: 'setData', source, firstRun }); }; /** * Replaces the dataset with a new one. Unlike `setData` and the `loadData` methods, it doesn't reset the * state of the table. * * @memberof Core# * @function updateData * @since 11.1.0 * @param {Array} data Array of arrays or array of objects containing data. * @param {string} [source] Source of the `updateData` call. * @fires Hooks#beforeUpdateData * @fires Hooks#afterUpdateData * @fires Hooks#afterChange */ this.updateData = function(data, source) { replaceData( data, (newDataMap) => { datamap = newDataMap; }, (newDataMap) => { datamap = newDataMap; instance.columnIndexMapper.fitToLength(this.getInitialColumnCount()); instance.rowIndexMapper.fitToLength(this.countSourceRows()); grid.adjustRowsAndCols(); }, { hotInstance: instance, dataMap: datamap, dataSource, internalSource: 'updateData', source, firstRun }); }; /** * Loads new data to Handsontable. Loading new data resets the cell meta. * Since 8.0.0 loading new data also resets states corresponding to rows and columns * (for example, row/column sequence, column width, row height, frozen columns etc.). * * @memberof Core# * @function loadData * @param {Array} data Array of arrays or array of objects containing data. * @param {string} [source] Source of the loadData call. * @fires Hooks#beforeLoadData * @fires Hooks#beforeSetData * @fires Hooks#afterLoadData * @fires Hooks#afterSetData * @fires Hooks#afterChange */ this.loadData = function(data, source) { // Legacy alias for `setData` - these two should be kept in sync. replaceData( data, (newDataMap) => { datamap = newDataMap; }, () => { metaManager.clearCellsCache(); instance.initIndexMappers(); grid.adjustRowsAndCols(); if (firstRun) { firstRun = [null, 'loadData']; } }, { hotInstance: instance, dataMap: datamap, dataSource, internalSource: 'loadData', source, firstRun }); }; /** * Gets the initial column count, calculated based on the `columns` setting. * * @private * @returns {number} The calculated number of columns. */ this.getInitialColumnCount = function() { const columnsSettings = tableMeta.columns; let finalNrOfColumns = 0; // We will check number of columns when the `columns` property was defined as an array. Columns option may // narrow down or expand displayed dataset in that case. if (Array.isArray(columnsSettings)) { finalNrOfColumns = columnsSettings.length; } else if (isFunction(columnsSettings)) { if (instance.dataType === 'array') { const nrOfSourceColumns = this.countSourceCols(); for (let columnIndex = 0; columnIndex < nrOfSourceColumns; columnIndex += 1) { if (columnsSettings(columnIndex)) { finalNrOfColumns += 1; } } // Extended dataset by the `columns` property? Moved code right from the refactored `countCols` method. } else if (instance.dataType === 'object' || instance.dataType === 'function') { finalNrOfColumns = datamap.colToPropCache.length; } // In some cases we need to check columns length from the schema, i.e. `data` may be empty. } else if (isDefined(tableMeta.dataSchema)) { const schema = datamap.getSchema(); // Schema may be defined as an array of objects. Each object will define column. finalNrOfColumns = Array.isArray(schema) ? schema.length : deepObjectSize(schema); } else { // We init index mappers by length of source data to provide indexes also for skipped indexes. finalNrOfColumns = this.countSourceCols(); } return finalNrOfColumns; }; /** * Init index mapper which manage indexes assigned to the data. * * @private */ this.initIndexMappers = function() { this.columnIndexMapper.initToLength(this.getInitialColumnCount()); this.rowIndexMapper.initToLength(this.countSourceRows()); }; /** * Returns the current data object (the same one that was passed by `data` configuration option or `loadData` method, * unless some modifications have been applied (i.e. Sequence of rows/columns was changed, some row/column was skipped). * If that's the case - use the {@link Core#getSourceData} method.). * * Optionally you can provide cell range by defining `row`, `column`, `row2`, `column2` to get only a fragment of table data. * * @memberof Core# * @function getData * @param {number} [row] From visual row index. * @param {number} [column] From visual column index. * @param {number} [row2] To visual row index. * @param {number} [column2] To visual column index. * @returns {Array[]} Array with the data. * @example * ```js * // Get all data (in order how it is rendered in the table). * hot.getData(); * // Get data fragment (from top-left 0, 0 to bottom-right 3, 3). * hot.getData(3, 3); * // Get data fragment (from top-left 2, 1 to bottom-right 3, 3). * hot.getData(2, 1, 3, 3); * ``` */ this.getData = function(row, column, row2, column2) { if (isUndefined(row)) { return datamap.getAll(); } return datamap.getRange(new CellCoords(row, column), new CellCoords(row2, column2), datamap.DESTINATION_RENDERER); }; /** * Returns a string value of the selected range. Each column is separated by tab, each row is separated by a new * line character. * * @memberof Core# * @function getCopyableText * @param {number} startRow From visual row index. * @param {number} startCol From visual column index. * @param {number} endRow To visual row index. * @param {number} endCol To visual column index. * @returns {string} */ this.getCopyableText = function(startRow, startCol, endRow, endCol) { return datamap.getCopyableText(new CellCoords(startRow, startCol), new CellCoords(endRow, endCol)); }; /** * Returns the data's copyable value at specified `row` and `column` index. * * @memberof Core# * @function getCopyableData * @param {number} row Visual row index. * @param {number} column Visual column index. * @returns {string} */ this.getCopyableData = function(row, column) { return datamap.getCopyable(row, datamap.colToProp(column)); }; /** * Returns schema provided by constructor settings. If it doesn't exist then it returns the schema based on the data * structure in the first row. * * @memberof Core# * @function getSchema * @returns {object} Schema object. */ this.getSchema = function() { return datamap.getSchema(); }; /** * Use it if you need to change configuration after initialization. The `settings` argument is an object containing the new * settings, declared the same way as in the initial settings object. * * __Note__, that although the `updateSettings` method doesn't overwrite the previously declared settings, it might reset * the settings made post-initialization. (for example - ignore changes made using the columnResize feature). * * Since 8.0.0 passing `columns` or `data` inside `settings` objects will result in resetting states corresponding to rows and columns * (for example, row/column sequence, column width, row height, frozen columns etc.). * * @memberof Core# * @function updateSettings * @param {object} settings New settings object (see {@link Options}). * @param {boolean} [init=false] Internally used for in initialization mode. * @example * ```js * hot.updateSettings({ * contextMenu: true, * colHeaders: true, * fixedRowsTop: 2 * }); * ``` * @fires Hooks#afterCellMetaReset * @fires Hooks#afterUpdateSettings */ this.updateSettings = function(settings, init = false) { // TODO: uncomment the next line with the next major version update // Do not forget to re-enable the pending tests that cover the change: // * https://github.com/handsontable/handsontable/blob/9f62c282a1c951b27cd8406aa27105bd32b05bb6/handsontable/test/e2e/core/toPhysicalColumn.spec.js#L70 // * https://github.com/handsontable/handsontable/blob/9f62c282a1c951b27cd8406aa27105bd32b05bb6/handsontable/test/e2e/core/toVisualColumn.spec.js#L70 // const dataUpdateFunction = (firstRun ? instance.setData : instance.updateData).bind(this); const dataUpdateFunction = instance.setData.bind(this); let columnsAsFunc = false; let i; let j; if (isDefined(settings.rows)) { throw new Error('The "rows" setting is no longer supported. Do you mean startRows, minRows or maxRows?'); } if (isDefined(settings.cols)) { throw new Error('The "cols" setting is no longer supported. Do you mean startCols, minCols or maxCols?'); } if (isDefined(settings.ganttChart)) { throw new Error('Since 8.0.0 the "ganttChart" setting is no longer supported.'); } // eslint-disable-next-line no-restricted-syntax for (i in settings) { if (i === 'data') { /* eslint-disable-next-line no-continue */ continue; // loadData will be triggered later } else if (i === 'language') { setLanguage(settings.language); /* eslint-disable-next-line no-continue */ continue; } else if (i === 'className') { setClassName('className', settings.className); } else if (i === 'tableClassName' && instance.table) { setClassName('tableClassName', settings.tableClassName); instance.view.wt.wtOverlays.syncOverlayTableClassNames(); } else if (Hooks.getSingleton().isRegistered(i) || Hooks.getSingleton().isDeprecated(i)) { if (isFunction(settings[i]) || Array.isArray(settings[i])) { settings[i].initialHook = true; instance.addHook(i, settings[i]); } } else if (!init && hasOwnProperty(settings, i)) { // Update settings globalMeta[i] = settings[i]; } } // Load data or create data map if (settings.data === void 0 && tableMeta.data === void 0) { dataUpdateFunction(null, 'updateSettings'); // data source created just now } else if (settings.data !== void 0) { dataUpdateFunction(settings.data, 'updateSettings'); // data source given as option } else if (settings.columns !== void 0) { datamap.createMap(); // The `column` property has changed - dataset may be expanded or narrowed down. The `loadData` do the same. instance.initIndexMappers(); } const clen = instance.countCols(); const columnSetting = tableMeta.columns; // Init columns constructors configuration if (columnSetting && isFunction(columnSetting)) { columnsAsFunc = true; } // Clear cell meta cache if (settings.cell !== void 0 || settings.cells !== void 0 || settings.columns !== void 0) { metaManager.clearCache(); } if (clen > 0) { for (i = 0, j = 0; i < clen; i++) { // Use settings provided by user if (columnSetting) { const column = columnsAsFunc ? columnSetting(i) : columnSetting[j]; if (column) { metaManager.updateColumnMeta(j, column); } } j += 1; } } if (isDefined(settings.cell)) { objectEach(settings.cell, (cell) => { instance.setCellMetaObject(cell.row, cell.col, cell); }); } instance.runHooks('afterCellMetaReset'); let currentHeight = instance.rootElement.style.height; if (currentHeight !== '') { currentHeight = parseInt(instance.rootElement.style.height, 10); } let height = settings.height; if (isFunction(height)) { height = height(); } if (init) { const initialStyle = instance.rootElement.getAttribute('style'); if (initialStyle) { instance.rootElement.setAttribute('data-initialstyle', instance.rootElement.getAttribute('style')); } } if (height === null) { const initialStyle = instance.rootElement.getAttribute('data-initialstyle'); if (initialStyle && (initialStyle.indexOf('height') > -1 || initialStyle.indexOf('overflow') > -1)) { instance.rootElement.setAttribute('style', initialStyle); } else { instance.rootElement.style.height = ''; instance.rootElement.style.overflow = ''; } } else if (height !== void 0) { instance.rootElement.style.height = isNaN(height) ? `${height}` : `${height}px`; instance.rootElement.style.overflow = 'hidden'; } if (typeof settings.width !== 'undefined') { let width = settings.width; if (isFunction(width)) { width = width(); } instance.rootElement.style.width = isNaN(width) ? `${width}` : `${width}px`; } if (!init) { if (instance.view) { instance.view.wt.wtViewport.resetHasOversizedColumnHeadersMarked(); instance.view.wt.exportSettingsAsClassNames(); } instance.runHooks('afterUpdateSettings', settings); } grid.adjustRowsAndCols(); if (instance.view && !firstRun) { instance.forceFullRender = true; // used when data was changed editorManager.lockEditor(); instance._refreshBorders(null); instance.view.wt.wtOverlays.adjustElementsSize(); editorManager.unlockEditor(); } if (!init && instance.view && (currentHeight === '' || height === '' || height === void 0) && currentHeight !== height) { instance.view.wt.wtOverlays.updateMainScrollableElements(); } }; /** * Get value from the selected cell. * * @memberof Core# * @function getValue * @returns {*} Value of selected cell. */ this.getValue = function() { const sel = instance.getSelectedLast(); if (tableMeta.getValue) { if (isFunction(tableMeta.getValue)) { return tableMeta.getValue.call(instance); } else if (sel) { return instance.getData()[sel[0][0]][tableMeta.getValue]; } } else if (sel) { return instance.getDataAtCell(sel[0], sel[1]); } }; /** * Returns the object settings. * * @memberof Core# * @function getSettings * @returns {object} Object containing the current table settings. */ this.getSettings = function() { return tableMeta; }; /** * Clears the data from the table (the table settings remain intact). * * @memberof Core# * @function clear */ this.clear = function() { this.selectAll(); this.emptySelectedCells(); }; /** * Allows altering the table structure by either inserting/removing rows or columns. * This method works with an array data structure only. * * @memberof Core# * @function alter * @param {string} action Possible alter operations: * <ul> * <li> `'insert_row'` </li> * <li> `'insert_col'` </li> * <li> `'remove_row'` </li> * <li> `'remove_col'` </li> * </ul>. * @param {number|number[]} index Visual index of the row/column before which the new row/column will be * inserted/removed or an array of arrays in format `[[index, amount],...]`. * @param {number} [amount=1] Amount of rows/columns to be inserted or removed. * @param {string} [source] Source indicator. * @param {boolean} [keepEmptyRows] Flag for preventing deletion of empty rows. * @example * ```js * // Insert new row above the row at given visual index. * hot.alter('insert_row', 10); * // Insert 3 new columns before 10th column. * hot.alter('insert_col', 10, 3); * // Remove 2 rows starting from 10th row. * hot.alter('remove_row', 10, 2); * // Remove 5 non-contiquous rows (it removes 3 rows from visual index 1 and 2 rows from visual index 5). * hot.alter('remove_row', [[1, 3], [5, 2]]); * ``` */ this.alter = function(action, index, amount, source, keepEmptyRows) { grid.alter(action, index, amount, source, keepEmptyRows); }; /** * Returns a TD element for the given `row` and `column` arguments, if it is rendered on screen. * Returns `null` if the TD is not rendered on screen (probably because that part of the table is not visible). * * @memberof Core# * @function getCell * @param {number} row Visual row index. * @param {number} column Visual column index. * @param {boolean} [topmost=false] If set to `true`, it returns the TD element from the topmost overlay. For example, * if the wanted cell is in the range of fixed rows, it will return a TD element from the `top` overlay. * @returns {HTMLTableCellElement|null} The cell's TD element. */ this.getCell = function(row, column, topmost = false) { let renderableColumnIndex = column; // Handling also column headers. let renderableRowIndex = row; // Handling also row headers. if (column >= 0) { if (this.columnIndexMapper.isHidden(this.toPhysicalColumn(column))) { return null; } renderableColumnIndex = this.columnIndexMapper.getRenderableFromVisualIndex(column); } if (row >= 0) { if (this.rowIndexMapper.isHidden(this.toPhysicalRow(row))) { return null; } renderableRowIndex = this.rowIndexMapper.getRenderableFromVisualIndex(row); } if (renderableRowIndex === null || renderableColumnIndex === null) { return null; } return instance.view.getCellAtCoords(new CellCoords(renderableRowIndex, renderableColumnIndex), topmost); }; /** * Returns the coordinates of the cell, provided as a HTML table cell element. * * @memberof Core# * @function getCoords * @param {HTMLTableCellElement} element The HTML Element representing the cell. * @returns {CellCoords|null} Visual coordinates object. * @example * ```js * hot.getCoords(hot.getCell(1, 1)); * // it returns CellCoords object instance with props row: 1 and col: 1. * ``` */ this.getCoords = function(element) { const renderableCoords = this.view.wt.wtTable.getCoords(element); if (renderableCoords === null) { return null; } const { row: renderableRow, col: renderableColumn } = renderableCoords; let visualRow = renderableRow; let visualColumn = renderableColumn; if (renderableRow >= 0) { visualRow = this.rowIndexMapper.getVisualFromRenderableIndex(renderableRow); } if (renderableColumn >= 0) { visualColumn = this.columnIndexMapper.getVisualFromRenderableIndex(renderableColumn); } return new CellCoords(visualRow, visualColumn); }; /** * Returns the property name that corresponds with the given column index. * If the data source is an array of arrays, it returns the columns index. * * @memberof Core# * @function colToProp * @param {number} column Visual column index. * @returns {string|number} Column property or physical column index. */ this.colToProp = function(column) { return datamap.colToProp(column); }; /** * Returns column index that corresponds with the given property. * * @memberof Core# * @function propToCol * @param {string|number} prop Property name or physical column index. * @returns {number} Visual column index. */ this.propToCol = function(prop) { return datamap.propToCol(prop); }; /** * Translate physical row index into visual. * * This method is useful when you want to retrieve visual row index which can be reordered, moved or trimmed * based on a physical index. * * @memberof Core# * @function toVisualRow * @param {number} row Physical row index. * @returns {number} Returns visual row index. */ this.toVisualRow = row => this.rowIndexMapper.getVisualFromPhysicalIndex(row); /** * Translate physical column index into visual. * * This method is useful when you want to retrieve visual column index which can be reordered, moved or trimmed * based on a physical index. * * @memberof Core# * @function toVisualColumn * @param {number} column Physical column index. * @returns {number} Returns visual column index. */ this.toVisualColumn = column => this.columnIndexMapper.getVisualFromPhysicalIndex(column); /** * Translate visual row index into physical. * * This method is useful when you want to retrieve physical row index based on a visual index which can be * reordered, moved or trimmed. * * @memberof Core# * @function toPhysicalRow * @param {number} row Visual row index. * @returns {number} Returns physical row index. */ this.toPhysicalRow = row => this.rowIndexMapper.getPhysicalFromVisualIndex(row); /** * Translate visual column index into physical. * * This method is useful when you want to retrieve physical column index based on a visual index which can be * reordered, moved or trimmed. * * @memberof Core# * @function toPhysicalColumn * @param {number} column Visual column index. * @returns {number} Returns physical column index. */ this.toPhysicalColumn = column => this.columnIndexMapper.getPhysicalFromVisualIndex(column); /** * @description * Returns the cell value at `row`, `column`. * * __Note__: If data is reordered, sorted or trimmed, the currently visible order will be used. * * @memberof Core# * @function getDataAtCell * @param {number} row Visual row index. * @param {number} column Visual column index. * @returns {*} Data at cell. */ this.getDataAtCell = function(row, column) { return datamap.get(row, datamap.colToProp(column)); }; /** * Returns value at visual `row` and `prop` indexes. * * __Note__: If data is reordered, sorted or trimmed, the currently visible order will be used. * * @memberof Core# * @function getDataAtRowProp * @param {number} row Visual row index. * @param {string} prop Property name. * @returns {*} Cell value. */ this.getDataAtRowProp = function(row, prop) { return datamap.get(row, prop); }; /** * @description * Returns array of column values from the data source. * * __Note__: If columns were reordered or sorted, the currently visible order will be used. * * @memberof Core# * @function getDataAtCol * @param {number} column Visual column index. * @returns {Array} Array of cell values. */ this.getDataAtCol = function(column) { return [].concat(...datamap.getRange( new CellCoords(0, column), new CellCoords(tableMeta.data.length - 1, column), datamap.DESTINATION_RENDERER )); }; /** * Given the object property name (e.g. `'first.name'` or `'0'`), returns an array of column's values from the table data. * You can also provide a column index as the first argument. * * @memberof Core# * @function getDataAtProp * @param {string|number} prop Property name or physical column index. * @returns {Array} Array of cell values. */ // TODO: Getting data from `datamap` should work on visual indexes. this.getDataAtProp = function(prop) { const range = datamap.getRange( new CellCoords(0, datamap.propToCol(prop)), new CellCoords(tableMeta.data.length - 1, datamap.propToCol(prop)), datamap.DESTINATION_RENDERER); return [].concat(...range); }; /** * Returns a clone of the source data object. * Optionally you can provide a cell range by using the `row`, `column`, `row2`, `column2` arguments, to get only a * fragment of the table data. * * __Note__: This method does not participate in data transformation. If the visual data of the table is reordered, * sorted or trimmed only physical indexes are correct. * * @memberof Core# * @function getSourceData * @param {number} [row] From physical row index. * @param {number} [column] From physical column index (or visual index, if data type is an array of objects). * @param {number} [row2] To physical row index. * @param {number} [column2] To physical column index (or visual index, if data type is an array of objects). * @returns {Array[]|object[]} The table data. */ this.getSourceData = function(row, column, row2, column2) { let data; if (row === void 0) { data = dataSource.getData(); } else { data = dataSource.getByRange(new CellCoords(row, column), new CellCoords(row2, column2)); } return data; }; /** * Returns the source data object as an arrays of arrays format even when source data was provided in another format. * Optionally you can provide a cell range by using the `row`, `column`, `row2`, `column2` arguments, to get only a * fragment of the table data. * * __Note__: This method does not participate in data transformation. If the visual data of the table is reordered, * sorted or trimmed only physical indexes are correct. * * @memberof Core# * @function getSourceDataArray * @param {number} [row] From physical row index. * @param {number} [column] From physical column index (or visual index, if data type is an array of objects). * @param {number} [row2] To physical row index. * @param {number} [column2] To physical column index (or visual index, if data type is an array of objects). * @returns {Array} An array of arrays. */ this.getSourceDataArray = function(row, column, row2, column2) { let data; if (row === void 0) { data = dataSource.getData(true); } else { data = dataSource.getByRange(new CellCoords(row, column), new CellCoords(row2, column2), true); } return data; }; /** * Returns an array of column values from the data source. * * @memberof Core# * @function getSourceDataAtCol * @param {number} column Visual column index. * @returns {Array} Array of the column's cell values. */ // TODO: Getting data from `sourceData` should work always on physical indexes. this.getSourceDataAtCol = function(column) { return dataSource.getAtColumn(column); }; /* eslint-disable jsdoc/require-param */ /** * Set the provided value in the source data set at the provided coordinates. * * @memberof Core# * @function setSourceDataAtCell * @param {number|Array} row Physical row index or array of changes in format `[[row, prop, value], ...]`. * @param {number|string} column Physical column index / prop name. * @param {*} value The value to be set at the provided coordinates. * @param {string} [source] Source of the change as a string. */ /* eslint-enable jsdoc/require-param */ this.setSourceDataAtCell = function(row, column, value, source) { const input = setDataInputToArray(row, column, value); const isThereAnySetSourceListener = this.hasHook('afterSetSourceDataAtCell'); const changesForHook = []; if (isThereAnySetSourceListener) { arrayEach(input, ([changeRow, changeProp, changeValue]) => { changesForHook.push([ changeRow, changeProp, dataSource.getAtCell(changeRow, changeProp), // The previous value. changeValue, ]); }); } arrayEach(input, ([changeRow, changeProp, changeValue]) => { dataSource.setAtCell(changeRow, changeProp, changeValue); }); if (isThereAnySetSourceListener) { this.runHooks('afterSetSourceDataAtCell', changesForHook, source); } this.render(); const activeEditor = instance.getActiveEditor(); if (activeEditor && isDefined(activeEditor.refreshValue)) { activeEditor.refreshValue(); } }; /** * Returns a single row of the data (array or object, depending on what data format you use). * * __Note__: This method does not participate in data transformation. If the visual data of the table is reordered, * sorted or trimmed only physical indexes are correct. * * @memberof Core# * @function getSourceDataAtRow * @param {number} row Physical row index. * @returns {Array|object} Single row of data. */ this.getSourceDataAtRow = function(row) { return dataSource.getAtRow(row); }; /** * Returns a single value from the data source. * * @memberof Core# * @function getSourceDataAtCell * @param {number} row Physical row index. * @param {number} column Visual column index. * @returns {*} Cell data. */ // TODO: Getting data from `sourceData` should work always on physical indexes. this.getSourceDataAtCell = function(row, column) { return dataSource.getAtCell(row, column); }; /** * @description * Returns a single row of the data. * * __Note__: If rows were reordered, sorted or trimmed, the currently visible order will be used. * * @memberof Core# * @function getDataAtRow * @param {number} row Visual row index. * @returns {Array} Array of row's cell data. */ this.getDataAtRow = function(row) { const data = datamap.getRange( new CellCoords(row, 0), new CellCoords(row, this.countCols() - 1), datamap.DESTINATION_RENDERER ); return data[0] || []; }; /** * @description * Returns a data type defined in the Handsontable settings under the `type` key ({@link Options#type}). * If there are cells with different types in the selected range, it returns `'mixed'`. * * __Note__: If data is reordered, sorted or trimmed, the currently visible order will be used. * * @memberof Core# * @function getDataType * @param {number} rowFrom From visual row index. * @param {number} columnFrom From visual column index. * @param {number} rowTo To visual row index. * @param {number} columnTo To visual column index. * @returns {string} Cell type (e.q: `'mixed'`, `'text'`, `'numeric'`, `'autocomplete'`). */ this.getDataType = function(rowFrom, columnFrom, rowTo, columnTo) { const coords = rowFrom === void 0 ? [0, 0, this.countRows(), this.countCols()] : [rowFrom, columnFrom, rowTo, columnTo]; const [rowStart, columnStart] = coords; let [,, rowEnd, columnEnd] = coords; let previousType = null; let currentType = null; if (rowEnd === void 0) { rowEnd = rowStart; } if (columnEnd === void 0) { columnEnd = columnStart; } let type = 'mixed'; rangeEach(Math.max(Math.min(rowStart, rowEnd), 0), Math.max(rowStart, rowEnd), (row) => { let isTypeEqual = true; rangeEach(Math.max(Math.min(columnStart, columnEnd), 0), Math.max(columnStart, columnEnd), (column) => { const cellType = this.getCellMeta(row, column); currentType = cellType.type; if (previousType) { isTypeEqual = previousType === currentType; } else { previousType = currentType; } return isTypeEqual; }); type = isTypeEqual ? currentType : 'mixed'; return isTypeEqual; }); return type; }; /** * Remove a property defined by the `key` argument from the cell meta object for the provided `row` and `column` coordinates. * * @memberof Core# * @function removeCellMeta * @param {number} row Visual row index. * @param {number} column Visual column index. * @param {string} key Property name. * @fires Hooks#beforeRemoveCellMeta * @fires Hooks#afterRemoveCellMeta */ this.removeCellMeta = function(row, column, key) { const [physicalRow, physicalColumn] = [this.toPhysicalRow(row), this.toPhysicalColumn(column)]; let cachedValue = metaManager.getCellMetaKeyValue(physicalRow, physicalColumn, key); const hookResult = instance.runHooks('beforeRemoveCellMeta', row, column, key, cachedValue); if (hookResult !== false) { metaManager.removeCellMeta(physicalRow, physicalColumn, key); instance.runHooks('afterRemoveCellMeta', row, column, key, cachedValue); } cachedValue = null; }; /** * Removes or adds one or more rows of the cell meta objects to the cell meta collections. * * @since 0.30.0 * @memberof Core# * @function spliceCellsMeta * @param {number} visualIndex A visual index that specifies at what position to add/remove items. * @param {number} [deleteAmount=0] The number of items to be removed. If set to 0, no cell meta objects will be removed. * @param {...object} [cellMetaRows] The new cell meta row objects to be added to the cell meta collection. */ this.spliceCellsMeta = function(visualIndex, deleteAmount = 0, ...cellMetaRows) { if (cellMetaRows.length > 0 && !Array.isArray(cellMetaRows[0])) { throw new Error('The 3rd argument (cellMetaRows) has to be passed as an array of cell meta objects array.'); } if (deleteAmount > 0) { metaManager.removeRow(this.toPhysicalRow(visualIndex), deleteAmount); } if (cellMetaRows.length > 0) { arrayEach(cellMetaRows.reverse(), (cellMetaRow) => { metaManager.createRow(this.toPhysicalRow(visualIndex)); arrayEach(cellMetaRow, (cellMeta, columnIndex) => this.setCellMetaObject(visualIndex, columnIndex, cellMeta)); }); } instance.render(); }; /** * Set cell meta data object defined by `prop` to the corresponding params `row` and `column`. * * @memberof Core# * @function setCellMetaObject * @param {number} row Visual row index. * @param {number} column Visual column index. * @param {object} prop Meta object. */ this.setCellMetaObject = function(row, column, prop) { if (typeof prop === 'object') { objectEach(prop, (value, key) => { this.setCellMeta(row, column, key, value); }); } }; /** * Sets a property defined by the `key` property to the meta object of a cell corresponding to params `row` and `column`. * * @memberof Core# * @function setCellMeta * @param {number} row Visual row index. * @param {number} column Visual column index. * @param {string} key Property name. * @param {string} value Property value. * @fires Hooks#beforeSetCellMeta * @fires Hooks#afterSetCellMeta */ this.setCellMeta = function(row, column, key, value) { const allowSetCellMeta = instance.runHooks('beforeSetCellMeta', row, column, key, value); if (allowSetCellMeta === false) { return; } let physicalRow = row; let physicalColumn = column; if (row < this.countRows()) { physicalRow = this.toPhysicalRow(row); } if (column < this.countCols()) { physicalColumn = this.toPhysicalColumn(column); } metaManager.setCellMeta(physicalRow, physicalColumn, key, value); instance.runHooks('afterSetCellMeta', row, column, key, value); }; /** * Get all the cells meta settings at least once generated in the table (in order of cell initialization). * * @memberof Core# * @function getCellsMeta * @returns {Array} Returns an array of ColumnSettings object instances. */ this.getCellsMeta = function() { return metaManager.getCellsMeta(); }; /** * Returns the cell properties object for the given `row` and `column` coordinates. * * @memberof Core# * @function getCellMeta * @param {number} row Visual row index. * @param {number} column Visual column index. * @returns {object} The cell properties object. * @fires Hooks#beforeGetCellMeta * @fires Hooks#afterGetCellMeta */ this.getCellMeta = function(row, column) { let physicalRow = this.toPhysicalRow(row); let physicalColumn = this.toPhysicalColumn(column); if (physicalRow === null) { physicalRow = row; } if (physicalColumn === null) { physicalColumn = column; } return metaManager.getCellMeta(physicalRow, physicalColumn, { visualRow: row, visualColumn: column, }); }; /** * Returns an array of cell meta objects for specified physical row index. * * @memberof Core# * @function getCellMetaAtRow * @param {number} row Physical row index. * @returns {Array} */ this.getCellMetaAtRow = function(row) { return metaManager.getCellsMetaAtRow(row); }; /** * Checks if the data format and config allows user to modify the column structure. * * @memberof Core# * @function isColumnModificationAllowed * @returns {boolean} */ this.isColumnModificationAllowed = function() { return !(instance.dataType === 'object' || tableMeta.columns); }; const rendererLookup = cellMethodLookupFactory('renderer'); /** * Returns the cell renderer function by given `row` and `column` arguments. * * @memberof Core# * @function getCellRenderer * @param {number|object} row Visual row index or cell meta object (see {@link Core#getCellMeta}). * @param {number} column Visual column index. * @returns {Function} The renderer function. * @example * ```js * // Get cell renderer using `row` and `column` coordinates. * hot.getCellRenderer(1, 1); * // Get cell renderer using cell meta object. * hot.getCellRenderer(hot.getCellMeta(1, 1)); * ``` */ this.getCellRenderer = function(row, column) { return getRenderer(rendererLookup.call(this, row, column)); }; /** * Returns the cell editor class by the provided `row` and `column` arguments. * * @memberof Core# * @function getCellEditor * @param {number} row Visual row index or cell meta object (see {@link Core#getCellMeta}). * @param {number} column Visual column index. * @returns {Function} The editor class. * @example * ```js * // Get cell editor class using `row` and `column` coordinates. * hot.getCellEditor(1, 1); * // Get cell editor class using cell meta object. * hot.getCellEditor(hot.getCellMeta(1, 1)); * ``` */ this.getCellEditor = cellMethodLookupFactory('editor'); const validatorLookup = cellMethodLookupFactory('validator'); /** * Returns the cell validator by `row` and `column`. * * @memberof Core# * @function getCellValidator * @param {number|object} row Visual row index or cell meta object (see {@link Core#getCellMeta}). * @param {number} column Visual column index. * @returns {Function|RegExp|undefined} The validator function. * @example * ```js * // Get cell valiator using `row` and `column` coordinates. * hot.getCellValidator(1, 1); * // Get cell valiator using cell meta object. * hot.getCellValidator(hot.getCellMeta(1, 1)); * ``` */ this.getCellValidator = function(row, column) { let validator = validatorLookup.call(this, row, column); if (typeof validator === 'string') { validator = getValidator(validator); } return validator; }; /** * Validates all cells using their validator functions and calls callback when finished. * * If one of the cells is invalid, the callback will be fired with `'valid'` arguments as `false` - otherwise it * would equal `true`. * * @memberof Core# * @function validateCells * @param {Function} [callback] The callback function. * @example * ```js * hot.validateCells((valid) => { * if (valid) { * // ... code for validated cells * } * }) * ``` */ this.validateCells = function(callback) { this._validateCells(callback); }; /** * Validates rows using their validator functions and calls callback when finished. * * If one of the cells is invalid, the callback will be fired with `'valid'` arguments as `false` - otherwise it * would equal `true`. * * @memberof Core# * @function validateRows * @param {Array} [rows] Array of validation target visual row indexes. * @param {Function} [callback] The callback function. * @example * ```js * hot.validateRows([3, 4, 5], (valid) => { * if (valid) { * // ... code for validated rows * } * }) * ``` */ this.validateRows = function(rows, callback) { if (!Array.isArray(rows)) { throw new Error('validateRows parameter `rows` must be an array'); } this._validateCells(callback, rows); }; /** * Validates columns using their validator functions and calls callback when finished. * * If one of the cells is invalid, the callback will be fired with `'valid'` arguments as `false` - otherwise it * would equal `true`. * * @memberof Core# * @function validateColumns * @param {Array} [columns] Array of validation target visual columns indexes. * @param {Function} [callback] The callback function. * @example * ```js * hot.validateColumns([3, 4, 5], (valid) => { * if (valid) { * // ... code for validated columns * } * }) * ``` */ this.validateColumns = function(columns, callback) { if (!Array.isArray(columns)) { throw new Error('validateColumns parameter `columns` must be an array'); } this._validateCells(callback, undefined, columns); }; /** * Validates all cells using their validator functions and calls callback when finished. * * If one of the cells is invalid, the callback will be fired with `'valid'` arguments as `false` - otherwise it would equal `true`. * * Private use intended. * * @private * @memberof Core# * @function _validateCells * @param {Function} [callback] The callback function. * @param {Array} [rows] An array of validation target visual row indexes. * @param {Array} [columns] An array of validation target visual column indexes. */ this._validateCells = function(callback, rows, columns) { const waitingForValidator = new ValidatorsQueue(); if (callback) { waitingForValidator.onQueueEmpty = callback; } let i = instance.countRows() - 1; while (i >= 0) { if (rows !== undefined && rows.indexOf(i) === -1) { i -= 1; continue; } let j = instance.countCols() - 1; while (j >= 0) { if (columns !== undefined && columns.indexOf(j) === -1) { j -= 1; continue; } waitingForValidator.addValidatorToQueue(); instance.validateCell(instance.getDataAtCell(i, j), instance.getCellMeta(i, j), (result) => { if (typeof result !== 'boolean') { throw new Error('Validation error: result is not boolean'); } if (result === false) { waitingForValidator.valid = false; } waitingForValidator.removeValidatorFormQueue(); }, 'validateCells'); j -= 1; } i -= 1; } waitingForValidator.checkIfQueueIsEmpty(); }; /** * Returns an array of row headers' values (if they are enabled). If param `row` was given, it returns the header of the given row as a string. * * @memberof Core# * @function getRowHeader * @param {number} [row] Visual row index. * @fires Hooks#modifyRowHeader * @returns {Array|string|number} Array of header values / single header value. */ this.getRowHeader = function(row) { let rowHeader = tableMeta.rowHeaders; let physicalRow = row; if (physicalRow !== void 0) { physicalRow = instance.runHooks('modifyRowHeader', physicalRow); } if (physicalRow === void 0) { rowHeader = []; rangeEach(instance.countRows() - 1, (i) => { rowHeader.push(instance.getRowHeader(i)); }); } else if (Array.isArray(rowHeader) && rowHeader[physicalRow] !== void 0) { rowHeader = rowHeader[physicalRow]; } else if (isFunction(rowHeader)) { rowHeader = rowHeader(physicalRow); } else if (rowHeader && typeof rowHeader !== 'string' && typeof rowHeader !== 'number') { rowHeader = physicalRow + 1; } return rowHeader; }; /** * Returns information about if this table is configured to display row headers. * * @memberof Core# * @function hasRowHeaders * @returns {boolean} `true` if the instance has the row headers enabled, `false` otherwise. */ this.hasRowHeaders = function() { return !!tableMeta.rowHeaders; }; /** * Returns information about if this table is configured to display column headers. * * @memberof Core# * @function hasColHeaders * @returns {boolean} `true` if the instance has the column headers enabled, `false` otherwise. */ this.hasColHeaders = function() { if (tableMeta.colHeaders !== void 0 && tableMeta.colHeaders !== null) { // Polymer has empty value = null return !!tableMeta.colHeaders; } for (let i = 0, ilen = instance.countCols(); i < ilen; i++) { if (instance.getColHeader(i)) { return true; } } return false; }; /** * Returns an array of column headers (in string format, if they are enabled). If param `column` is given, it * returns the header at the given column. * * @memberof Core# * @function getColHeader * @param {number} [column] Visual column index. * @fires Hooks#modifyColHeader * @returns {Array|string|number} The column header(s). */ this.getColHeader = function(column) { const columnIndex = instance.runHooks('modifyColHeader', column); let result = tableMeta.colHeaders; if (columnIndex === void 0) { const out = []; const ilen = instance.countCols(); for (let i = 0; i < ilen; i++) { out.push(instance.getColHeader(i)); } result = out; } else { const translateVisualIndexToColumns = function(visualColumnIndex) { const arr = []; const columnsLen = instance.countCols(); let index = 0; for (; index < columnsLen; index++) { if (isFunction(tableMeta.columns) && tableMeta.columns(index)) { arr.push(index); } } return arr[visualColumnIndex]; }; const physicalColumn = instance.toPhysicalColumn(columnIndex); const prop = translateVisualIndexToColumns(physicalColumn); if (tableMeta.colHeaders === false) { result = null; } else if (tableMeta.columns && isFunction(tableMeta.columns) && tableMeta.columns(prop) && tableMeta.columns(prop).title) { result = tableMeta.columns(prop).title; } else if (tableMeta.columns && tableMeta.columns[physicalColumn] && tableMeta.columns[physicalColumn].title) { result = tableMeta.columns[physicalColumn].title; } else if (Array.isArray(tableMeta.colHeaders) && tableMeta.colHeaders[physicalColumn] !== void 0) { result = tableMeta.colHeaders[physicalColumn]; } else if (isFunction(tableMeta.colHeaders)) { result = tableMeta.colHeaders(physicalColumn); } else if (tableMeta.colHeaders && typeof tableMeta.colHeaders !== 'string' && typeof tableMeta.colHeaders !== 'number') { result = spreadsheetColumnLabel(columnIndex); // see #1458 } } return result; }; /** * Return column width from settings (no guessing). Private use intended. * * @private * @memberof Core# * @function _getColWidthFromSettings * @param {number} col Visual col index. * @returns {number} */ this._getColWidthFromSettings = function(col) { let width; // We currently don't support cell meta objects for headers (negative values) if (col >= 0) { const cellProperties = instance.getCellMeta(0, col); width = cellProperties.width; } if (width === void 0 || width === tableMeta.width) { width = tableMeta.colWidths; } if (width !== void 0 && width !== null) { switch (typeof width) { case 'object': // array width = width[col]; break; case 'function': width = width(col); break; default: break; } if (typeof width === 'string') { width = parseInt(width, 10); } } return width; }; /** * Returns the width of the requested column. * * @memberof Core# * @function getColWidth * @param {number} column Visual column index. * @returns {number} Column width. * @fires Hooks#modifyColWidth */ this.getColWidth = function(column) { let width = instance._getColWidthFromSettings(column); width = instance.runHooks('modifyColWidth', width, column); if (width === void 0) { width = ViewportColumnsCalculator.DEFAULT_WIDTH; } return width; }; /** * Return row height from settings (no guessing). Private use intended. * * @private * @memberof Core# * @function _getRowHeightFromSettings * @param {number} row Visual row index. * @returns {number} */ this._getRowHeightFromSettings = function(row) { // let cellProperties = instance.getCellMeta(row, 0); // let height = cellProperties.height; // // if (height === void 0 || height === tableMeta.height) { // height = cellProperties.rowHeights; // } let height = tableMeta.rowHeights; if (height !== void 0 && height !== null) { switch (typeof height) { case 'object': // array height = height[row]; break; case 'function': height = height(row); break; default: break; } if (typeof height === 'string') { height = parseInt(height, 10); } } return height; }; /** * Returns the row height. * * Mind that this method is different from the [AutoRowSize](@/api/autoRowSize.md) plugin's [`getRowHeight()`](@/api/autoRowSize.md#getrowheight) method. * * @memberof Core# * @function getRowHeight * @param {number} row Visual row index. * @returns {number} The given row's height. * @fires Hooks#modifyRowHeight */ this.getRowHeight = function(row) { let height = instance._getRowHeightFromSettings(row); height = instance.runHooks('modifyRowHeight', height, row); return height; }; /** * Returns the total number of rows in the data source. * * @memberof Core# * @function countSourceRows * @returns {number} Total number of rows. */ this.countSourceRows = function() { return dataSource.countRows(); }; /** * Returns the total number of columns in the data source. * * @memberof Core# * @function countSourceCols * @returns {number} Total number of columns. */ this.countSourceCols = function() { return dataSource.countFirstRowKeys(); }; /** * Returns the total number of visual rows in the table. * * @memberof Core# * @function countRows * @returns {number} Total number of rows. */ this.countRows = function() { return datamap.getLength(); }; /** * Returns the total number of visible columns in the table. * * @memberof Core# * @function countCols * @returns {number} Total number of columns. */ this.countCols = function() { const maxCols = tableMeta.maxCols; const dataLen = this.columnIndexMapper.getNotTrimmedIndexesLength(); return Math.min(maxCols, dataLen); }; /** * Returns the number of rendered rows (including rows partially or fully rendered outside viewport). * * @memberof Core# * @function countRenderedRows * @returns {number} Returns -1 if table is not visible. */ this.countRenderedRows = function() { return instance.view.wt.drawn ? instance.view.wt.wtTable.getRenderedRowsCount() : -1; }; /** * Returns the number of visible rows (rendered rows that fully fit inside viewport). * * @memberof Core# * @function countVisibleRows * @returns {number} Number of visible rows or -1. */ this.countVisibleRows = function() { return instance.view.wt.drawn ? instance.view.wt.wtTable.getVisibleRowsCount() : -1; }; /** * Returns the number of rendered columns (including columns partially or fully rendered outside viewport). * * @memberof Core# * @function countRenderedCols * @returns {number} Returns -1 if table is not visible. */ this.countRenderedCols = function() { return instance.view.wt.drawn ? instance.view.wt.wtTable.getRenderedColumnsCount() : -1; }; /** * Returns the number of visible columns. Returns -1 if table is not visible. * * @memberof Core# * @function countVisibleCols * @returns {number} Number of visible columns or -1. */ this.countVisibleCols = function() { return instance.view.wt.drawn ? instance.view.wt.wtTable.getVisibleColumnsCount() : -1; }; /** * Returns the number of empty rows. If the optional ending parameter is `true`, returns the * number of empty rows at the bottom of the table. * * @memberof Core# * @function countEmptyRows * @param {boolean} [ending=false] If `true`, will only count empty rows at the end of the data source. * @returns {number} Count empty rows. */ this.countEmptyRows = function(ending = false) { let emptyRows = 0; rangeEachReverse(instance.countRows() - 1, (visualIndex) => { if (instance.isEmptyRow(visualIndex)) { emptyRows += 1; } else if (ending === true) { return false; } }); return emptyRows; }; /** * Returns the number of empty columns. If the optional ending parameter is `true`, returns the number of empty * columns at right hand edge of the table. * * @memberof Core# * @function countEmptyCols * @param {boolean} [ending=false] If `true`, will only count empty columns at the end of the data source row. * @returns {number} Count empty cols. */ this.countEmptyCols = function(ending = false) { if (instance.countRows() < 1) { return 0; } let emptyColumns = 0; rangeEachReverse(instance.countCols() - 1, (visualIndex) => { if (instance.isEmptyCol(visualIndex)) { emptyColumns += 1; } else if (ending === true) { return false; } }); return emptyColumns; }; /** * Check if all cells in the row declared by the `row` argument are empty. * * @memberof Core# * @function isEmptyRow * @param {number} row Visual row index. * @returns {boolean} `true` if the row at the given `row` is empty, `false` otherwise. */ this.isEmptyRow = function(row) { return tableMeta.isEmptyRow.call(instance, row); }; /** * Check if all cells in the the column declared by the `column` argument are empty. * * @memberof Core# * @function isEmptyCol * @param {number} column Column index. * @returns {boolean} `true` if the column at the given `col` is empty, `false` otherwise. */ this.isEmptyCol = function(column) { return tableMeta.isEmptyCol.call(instance, column); }; /** * Select cell specified by `row` and `column` values or a range of cells finishing at `endRow`, `endCol`. If the table * was configured to support data column properties that properties can be used to making a selection. * * By default, viewport will be scrolled to the selection. After the `selectCell` method had finished, the instance * will be listening to keyboard input on the document. * * @example * ```js * // select a single cell * hot.selectCell(2, 4); * // select a single cell using column property * hot.selectCell(2, 'address'); * // select a range of cells * hot.selectCell(2, 4, 3, 5); * // select a range of cells using column properties * hot.selectCell(2, 'address', 3, 'phone_number'); * // select a range of cells without scrolling to them * hot.selectCell(2, 'address', 3, 'phone_number', false); * ``` * * @memberof Core# * @function selectCell * @param {number} row Visual row index. * @param {number|string} column Visual column index or column property. * @param {number} [endRow] Visual end row index (if selecting a range). * @param {number|string} [endColumn] Visual end column index or column property (if selecting a range). * @param {boolean} [scrollToCell=true] If `true`, the viewport will be scrolled to the selection. * @param {boolean} [changeListener=true] If `false`, Handsontable will not change keyboard events listener to himself. * @returns {boolean} `true` if selection was successful, `false` otherwise. */ this.selectCell = function(row, column, endRow, endColumn, scrollToCell = true, changeListener = true) { if (isUndefined(row) || isUndefined(column)) { return false; } return this.selectCells([[row, column, endRow, endColumn]], scrollToCell, changeListener); }; /** * Make multiple, non-contiguous selection specified by `row` and `column` values or a range of cells * finishing at `endRow`, `endColumn`. The method supports two input formats which are the same as that * produces by `getSelected` and `getSelectedRange` methods. * * By default, viewport will be scrolled to selection. After the `selectCells` method had finished, the instance * will be listening to keyboard input on the document. * * @example * ```js * // Using an array of arrays. * hot.selectCells([[1, 1, 2, 2], [3, 3], [6, 2, 0, 2]]); * // Using an array of arrays with defined columns as props. * hot.selectCells([[1, 'id', 2, 'first_name'], [3, 'full_name'], [6, 'last_name', 0, 'first_name']]); * // Using an array of CellRange objects (produced by `.getSelectedRange()` method). * const selected = hot.getSelectedRange(); * * selected[0].from.row = 0; * selected[0].from.col = 0; * * hot.selectCells(selected); * ``` * * @memberof Core# * @since 0.38.0 * @function selectCells * @param {Array[]|CellRange[]} coords Visual coords passed as an array of array (`[[rowStart, columnStart, rowEnd, columnEnd], ...]`) * the same format as `getSelected` method returns or as an CellRange objects * which is the same format what `getSelectedRange` method returns. * @param {boolean} [scrollToCell=true] If `true`, the viewport will be scrolled to the selection. * @param {boolean} [changeListener=true] If `false`, Handsontable will not change keyboard events listener to himself. * @returns {boolean} `true` if selection was successful, `false` otherwise. */ this.selectCells = function(coords = [[]], scrollToCell = true, changeListener = true) { if (scrollToCell === false) { preventScrollingToCell = true; } const wasSelected = selection.selectCells(coords); if (wasSelected && changeListener) { instance.listen(); } preventScrollingToCell = false; return wasSelected; }; /** * Select column specified by `startColumn` visual index, column property or a range of columns finishing at `endColumn`. * * @example * ```js * // Select column using visual index. * hot.selectColumns(1); * // Select column using column property. * hot.selectColumns('id'); * // Select range of columns using visual indexes. * hot.selectColumns(1, 4); * // Select range of columns using column properties. * hot.selectColumns('id', 'last_name'); * ``` * * @memberof Core# * @since 0.38.0 * @function selectColumns * @param {number} startColumn The visual column index from which the selection starts. * @param {number} [endColumn=startColumn] The visual column index to which the selection finishes. If `endColumn` * is not defined the column defined by `startColumn` will be selected. * @returns {boolean} `true` if selection was successful, `false` otherwise. */ this.selectColumns = function(startColumn, endColumn = startColumn) { return selection.selectColumns(startColumn, endColumn); }; /** * Select row specified by `startRow` visual index or a range of rows finishing at `endRow`. * * @example * ```js * // Select row using visual index. * hot.selectRows(1); * // Select range of rows using visual indexes. * hot.selectRows(1, 4); * ``` * * @memberof Core# * @since 0.38.0 * @function selectRows * @param {number} startRow The visual row index from which the selection starts. * @param {number} [endRow=startRow] The visual row index to which the selection finishes. If `endRow` * is not defined the row defined by `startRow` will be selected. * @returns {boolean} `true` if selection was successful, `false` otherwise. */ this.selectRows = function(startRow, endRow = startRow) { return selection.selectRows(startRow, endRow); }; /** * Deselects the current cell selection on the table. * * @memberof Core# * @function deselectCell */ this.deselectCell = function() { selection.deselect(); }; /** * Select the whole table. The previous selection will be overwritten. * * @since 0.38.2 * @memberof Core# * @function selectAll * @param {boolean} [includeHeaders=true] `true` If the selection should include the row, column and corner headers, * `false` otherwise. */ this.selectAll = function(includeHeaders = true) { const includeRowHeaders = includeHeaders && this.hasRowHeaders(); const includeColumnHeaders = includeHeaders && this.hasColHeaders(); preventScrollingToCell = true; selection.selectAll(includeRowHeaders, includeColumnHeaders); preventScrollingToCell = false; }; const getIndexToScroll = (indexMapper, visualIndex) => { // Looking for a visual index on the right and then (when not found) on the left. return indexMapper.getFirstNotHiddenIndex(visualIndex, 1, true); }; /** * Scroll viewport to coordinates specified by the `row` and `column` arguments. * * @memberof Core# * @function scrollViewportTo * @param {number} [row] Row index. If the last argument isn't defined we treat the index as a visual row index. Otherwise, * we are using the index for numbering only this rows which may be rendered (we don't consider hidden rows). * @param {number} [column] Column index. If the last argument isn't defined we treat the index as a visual column index. * Otherwise, we are using the index for numbering only this columns which may be rendered (we don't consider hidden columns). * @param {boolean} [snapToBottom=false] If `true`, viewport is scrolled to show the cell on the bottom of the table. * @param {boolean} [snapToRight=false] If `true`, viewport is scrolled to show the cell on the right side of the table. * @param {boolean} [considerHiddenIndexes=true] If `true`, we handle visual indexes, otherwise we handle only indexes which * may be rendered when they are in the viewport (we don't consider hidden indexes as they aren't rendered). * @returns {boolean} `true` if scroll was successful, `false` otherwise. */ this.scrollViewportTo = function(row, column, snapToBottom = false, snapToRight = false, considerHiddenIndexes = true) { const snapToTop = !snapToBottom; const snapToLeft = !snapToRight; let renderableRow = row; let renderableColumn = column; if (considerHiddenIndexes) { const isRowInteger = Number.isInteger(row); const isColumnInteger = Number.isInteger(column); const visualRowToScroll = isRowInteger ? getIndexToScroll(this.rowIndexMapper, row) : void 0; const visualColumnToScroll = isColumnInteger ? getIndexToScroll(this.columnIndexMapper, column) : void 0; if (visualRowToScroll === null || visualColumnToScroll === null) { return false; } renderableRow = isRowInteger ? instance.rowIndexMapper.getRenderableFromVisualIndex(visualRowToScroll) : void 0; renderableColumn = isColumnInteger ? instance.columnIndexMapper.getRenderableFromVisualIndex(visualColumnToScroll) : void 0; } const isRowInteger = Number.isInteger(renderableRow); const isColumnInteger = Number.isInteger(renderableColumn); if (isRowInteger && isColumnInteger) { return instance.view.scrollViewport( new CellCoords(renderableRow, renderableColumn), snapToTop, snapToRight, snapToBottom, snapToLeft ); } if (isRowInteger && isColumnInteger === false) { return instance.view.scrollViewportVertically(renderableRow, snapToTop, snapToBottom); } if (isColumnInteger && isRowInteger === false) { return instance.view.scrollViewportHorizontally(renderableColumn, snapToRight, snapToLeft); } return false; }; /** * Removes the table from the DOM and destroys the instance of the Handsontable. * * @memberof Core# * @function destroy * @fires Hooks#afterDestroy */ this.destroy = function() { instance._clearTimeouts(); instance._clearImmediates(); if (instance.view) { // in case HT is destroyed before initialization has finished instance.view.destroy(); } if (dataSource) { dataSource.destroy(); } dataSource = null; metaManager.clearCache(); keyStateStopObserving(); if (isRootInstance(instance)) { const licenseInfo = this.rootDocument.querySelector('#hot-display-license-info'); if (licenseInfo) { licenseInfo.parentNode.removeChild(licenseInfo); } } empty(instance.rootElement); eventManager.destroy(); if (editorManager) { editorManager.destroy(); } // The plugin's `destroy` method is called as a consequence and it should handle // unregistration of plugin's maps. Some unregistered maps reset the cache. instance.batchExecution(() => { instance.rowIndexMapper.unregisterAll(); instance.columnIndexMapper.unregisterAll(); pluginsRegistry .getItems() .forEach(([, plugin]) => { plugin.destroy(); }); pluginsRegistry.clear(); instance.runHooks('afterDestroy'); }, true); Hooks.getSingleton().destroy(instance); objectEach(instance, (property, key, obj) => { // replace instance methods with post mortem if (isFunction(property)) { obj[key] = postMortem(key); } else if (key !== 'guid') { // replace instance properties with null (restores memory) // it should not be necessary but this prevents a memory leak side effects that show itself in Jasmine tests obj[key] = null; } }); instance.isDestroyed = true; // replace private properties with null (restores memory) // it should not be necessary but this prevents a memory leak side effects that show itself in Jasmine tests if (datamap) { datamap.destroy(); } instance.rowIndexMapper = null; instance.columnIndexMapper = null; datamap = null; grid = null; selection = null; editorManager = null; instance = null; }; /** * Replacement for all methods after the Handsontable was destroyed. * * @private * @param {string} method The method name. * @returns {Function} */ function postMortem(method) { return () => { throw new Error(`The "${method}" method cannot be called because this Handsontable instance has been destroyed`); }; } /** * Returns the active editor class instance. * * @memberof Core# * @function getActiveEditor * @returns {BaseEditor} The active editor instance. */ this.getActiveEditor = function() { return editorManager.getActiveEditor(); }; /** * Returns plugin instance by provided its name. * * @memberof Core# * @function getPlugin * @param {string} pluginName The plugin name. * @returns {BasePlugin|undefined} The plugin instance or undefined if there is no plugin. */ this.getPlugin = function(pluginName) { const unifiedPluginName = toUpperCaseFirst(pluginName); // Workaround for the UndoRedo plugin which, currently doesn't follow the plugin architecture. if (unifiedPluginName === 'UndoRedo') { return this.undoRedo; } return pluginsRegistry.getItem(unifiedPluginName); }; /** * Returns name of the passed plugin. * * @private * @memberof Core# * @param {BasePlugin} plugin The plugin instance. * @returns {string} */ this.getPluginName = function(plugin) { // Workaround for the UndoRedo plugin which, currently doesn't follow the plugin architecture. if (plugin === this.undoRedo) { return this.undoRedo.constructor.PLUGIN_KEY; } return pluginsRegistry.getId(plugin); }; /** * Returns the Handsontable instance. * * @memberof Core# * @function getInstance * @returns {Handsontable} The Handsontable instance. */ this.getInstance = function() { return instance; }; /** * Adds listener to the specified hook name (only for this Handsontable instance). * * @memberof Core# * @function addHook * @see Hooks#add * @param {string} key Hook name (see {@link Hooks}). * @param {Function|Array} callback Function or array of functions. * @example * ```js * hot.addHook('beforeInit', myCallback); * ``` */ this.addHook = function(key, callback) { Hooks.getSingleton().add(key, callback, instance); }; /** * Check if for a specified hook name there are added listeners (only for this Handsontable instance). All available * hooks you will find {@link Hooks}. * * @memberof Core# * @function hasHook * @see Hooks#has * @param {string} key Hook name. * @returns {boolean} * * @example * ```js * const hasBeforeInitListeners = hot.hasHook('beforeInit'); * ``` */ this.hasHook = function(key) { return Hooks.getSingleton().has(key, instance); }; /** * Adds listener to specified hook name (only for this Handsontable instance). After the listener is triggered, * it will be automatically removed. * * @memberof Core# * @function addHookOnce * @see Hooks#once * @param {string} key Hook name (see {@link Hooks}). * @param {Function|Array} callback Function or array of functions. * @example * ```js * hot.addHookOnce('beforeInit', myCallback); * ``` */ this.addHookOnce = function(key, callback) { Hooks.getSingleton().once(key, callback, instance); }; /** * Removes the hook listener previously registered with {@link Core#addHook}. * * @memberof Core# * @function removeHook * @see Hooks#remove * @param {string} key Hook name. * @param {Function} callback Reference to the function which has been registered using {@link Core#addHook}. * * @example * ```js * hot.removeHook('beforeInit', myCallback); * ``` */ this.removeHook = function(key, callback) { Hooks.getSingleton().remove(key, callback, instance); }; /** * Run the callbacks for the hook provided in the `key` argument using the parameters given in the other arguments. * * @memberof Core# * @function runHooks * @see Hooks#run * @param {string} key Hook name. * @param {*} [p1] Argument passed to the callback. * @param {*} [p2] Argument passed to the callback. * @param {*} [p3] Argument passed to the callback. * @param {*} [p4] Argument passed to the callback. * @param {*} [p5] Argument passed to the callback. * @param {*} [p6] Argument passed to the callback. * @returns {*} * * @example * ```js * // Run built-in hook * hot.runHooks('beforeInit'); * // Run custom hook * hot.runHooks('customAction', 10, 'foo'); * ``` */ this.runHooks = function(key, p1, p2, p3, p4, p5, p6) { return Hooks.getSingleton().run(instance, key, p1, p2, p3, p4, p5, p6); }; /** * Get language phrase for specified dictionary key. * * @memberof Core# * @function getTranslatedPhrase * @since 0.35.0 * @param {string} dictionaryKey Constant which is dictionary key. * @param {*} extraArguments Arguments which will be handled by formatters. * @returns {string} */ this.getTranslatedPhrase = function(dictionaryKey, extraArguments) { return getTranslatedPhrase(tableMeta.language, dictionaryKey, extraArguments); }; /** * Converts instance into outerHTML of HTMLTableElement. * * @memberof Core# * @function toHTML * @since 7.1.0 * @returns {string} */ this.toHTML = () => instanceToHTML(this); /** * Converts instance into HTMLTableElement. * * @memberof Core# * @function toTableElement * @since 7.1.0 * @returns {HTMLTableElement} */ this.toTableElement = () => { const tempElement = this.rootDocument.createElement('div'); tempElement.insertAdjacentHTML('afterbegin', instanceToHTML(this)); return tempElement.firstElementChild; }; this.timeouts = []; /** * Sets timeout. Purpose of this method is to clear all known timeouts when `destroy` method is called. * * @param {number|Function} handle Handler returned from setTimeout or function to execute (it will be automatically wraped * by setTimeout function). * @param {number} [delay=0] If first argument is passed as a function this argument set delay of the execution of that function. * @private */ this._registerTimeout = function(handle, delay = 0) { let handleFunc = handle; if (typeof handleFunc === 'function') { handleFunc = setTimeout(handleFunc, delay); } this.timeouts.push(handleFunc); }; /** * Clears all known timeouts. * * @private */ this._clearTimeouts = function() { arrayEach(this.timeouts, (handler) => { clearTimeout(handler); }); }; this.immediates = []; /** * Execute function execution to the next event loop cycle. Purpose of this method is to clear all known timeouts when `destroy` method is called. * * @param {Function} callback Function to be delayed in execution. * @private */ this._registerImmediate = function(callback) { this.immediates.push(setImmediate(callback)); }; /** * Clears all known timeouts. * * @private */ this._clearImmediates = function() { arrayEach(this.immediates, (handler) => { clearImmediate(handler); }); }; /** * Refresh selection borders. This is temporary method relic after selection rewrite. * * @private * @param {boolean} [revertOriginal=false] If `true`, the previous value will be restored. Otherwise, the edited value will be saved. * @param {boolean} [prepareEditorIfNeeded=true] If `true` the editor under the selected cell will be prepared to open. */ this._refreshBorders = function(revertOriginal = false, prepareEditorIfNeeded = true) { editorManager.destroyEditor(revertOriginal); instance.view.render(); if (prepareEditorIfNeeded && selection.isSelected()) { editorManager.prepareEditor(); } }; /** * Check if currently it is RTL direction. * * @private * @memberof Core# * @function isRtl * @returns {boolean} True if RTL. */ this.isRtl = function() { return instance.rootWindow.getComputedStyle(instance.rootElement).direction === 'rtl'; }; /** * Check if currently it is LTR direction. * * @private * @memberof Core# * @function isLtr * @returns {boolean} True if LTR. */ this.isLtr = function() { return !instance.isRtl(); }; /** * Returns 1 for LTR; -1 for RTL. Useful for calculations. * * @private * @memberof Core# * @function getDirectionFactor * @returns {number} Returns 1 for LTR; -1 for RTL. */ this.getDirectionFactor = function() { return instance.isLtr() ? 1 : -1; }; getPluginsNames().forEach((pluginName) => { const PluginClass = getPlugin(pluginName); pluginsRegistry.addItem(pluginName, new PluginClass(this)); }); Hooks.getSingleton().run(instance, 'construct'); }
1
20,905
What I am missing in this PR, and I think we discussed that on the weekly meeting, is that all the code snippets that advise using `loadData` should be changed to one of the two new methods. Otherwise we send confusing mixed signals by promoting `loadData` everywhere in the guides. `loadData` is not deprecated, but is legacy.
handsontable-handsontable
js
@@ -48,4 +48,4 @@ doc.css("h1, h2, h3, h4, h5, h6").each do |header| end puts "\n==================================================\n" -puts "!!! Don't forget to set a summary: https://#{ENV.fetch("APP_DOMAIN")}/admin/video/#{video.id}/edit" +puts "!!! Don't forget to set a summary: https://thoughtbot.com/upcase/admin/video/#{video.id}/edit"
1
#!./bin/rails runner # This script is designed to be run on Heroku, to set the notes and create # Markers for the Video with a given slug. # # It assumes that it's receiving a Markdown document on STDIN with the marker # times in the headers, like: # # ## Creating a database view 324 # # Run it like this: # # cat ../upcase-content/weekly-iteration-notes/scenic.md | staging run ./bin/create-notes-and-markers.rb cool-scenic-slug # # The slug is the `video.slug` in the database. UNDERLINE_HEADER = /^([^\n]+) \d+(\n[-=]+)/m ATX_HEADER = /^(#+.+) \d+$/ renderer = Redcarpet::Markdown.new( Redcarpet::Render::HTML.new(with_toc_data: true), autolink: true, tables: true, fenced_code_blocks: true, no_intra_emphasis: true, ) video_slug = ARGV.first raw_notes = STDIN.read # When we run `STDIN.read`, Heroku prints out everything it just read. In order # to separate that from error messages or output we actually care about, we # print blank lines. puts "\n" * 10 doc = Nokogiri::HTML(renderer.render(raw_notes)) # Remove the timestamps from the end of the headers notes = raw_notes.gsub(UNDERLINE_HEADER, '\1\2').gsub(ATX_HEADER, '\1') video = Video.find_by!(slug: video_slug) video.update!(notes: notes) doc.css("h1, h2, h3, h4, h5, h6").each do |header| header[:id].scan(/^(.+)-(\d+)$/).each do |anchor, time| video.markers.find_or_create_by!(anchor: anchor, time: time.to_i) end end puts "\n==================================================\n" puts "!!! Don't forget to set a summary: https://#{ENV.fetch("APP_DOMAIN")}/admin/video/#{video.id}/edit"
1
17,395
Line is too long. [100/80]
thoughtbot-upcase
rb
@@ -532,8 +532,12 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina return err } - if !gopts.JSON && parentSnapshotID != nil { - p.V("using parent snapshot %v\n", parentSnapshotID.Str()) + if !gopts.JSON { + if parentSnapshotID != nil { + p.P("using parent snapshot %v\n", parentSnapshotID.Str()) + } else { + p.P("no parent snapshot found, will read all files\n") + } } selectByNameFilter := func(item string) bool {
1
package main import ( "bufio" "bytes" "context" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "runtime" "strconv" "strings" "time" "github.com/spf13/cobra" tomb "gopkg.in/tomb.v2" "github.com/restic/restic/internal/archiver" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/textfile" "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/json" "github.com/restic/restic/internal/ui/termstatus" ) var cmdBackup = &cobra.Command{ Use: "backup [flags] FILE/DIR [FILE/DIR] ...", Short: "Create a new backup of files and/or directories", Long: ` The "backup" command creates a new snapshot and saves the files and directories given as the arguments. EXIT STATUS =========== Exit status is 0 if the command was successful. Exit status is 1 if there was a fatal error (no snapshot created). Exit status is 3 if some source data could not be read (incomplete snapshot created). `, PreRun: func(cmd *cobra.Command, args []string) { if backupOptions.Host == "" { hostname, err := os.Hostname() if err != nil { debug.Log("os.Hostname() returned err: %v", err) return } backupOptions.Host = hostname } }, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { if backupOptions.Stdin { for _, filename := range backupOptions.FilesFrom { if filename == "-" { return errors.Fatal("cannot use both `--stdin` and `--files-from -`") } } } var t tomb.Tomb term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet) t.Go(func() error { term.Run(t.Context(globalOptions.ctx)); return nil }) err := runBackup(backupOptions, globalOptions, term, args) if err != nil { return err } t.Kill(nil) return t.Wait() }, } // BackupOptions bundles all options for the backup command. type BackupOptions struct { Parent string Force bool Excludes []string InsensitiveExcludes []string ExcludeFiles []string InsensitiveExcludeFiles []string ExcludeOtherFS bool ExcludeIfPresent []string ExcludeCaches bool ExcludeLargerThan string Stdin bool StdinFilename string Tags []string Host string FilesFrom []string TimeStamp string WithAtime bool IgnoreInode bool UseFsSnapshot bool } var backupOptions BackupOptions // ErrInvalidSourceData is used to report an incomplete backup var ErrInvalidSourceData = errors.New("failed to read all source data during backup") func init() { cmdRoot.AddCommand(cmdBackup) f := cmdBackup.Flags() f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: last snapshot in the repo that has the same target files/directories)") f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`) f.StringArrayVarP(&backupOptions.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") f.StringArrayVar(&backupOptions.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames") f.StringArrayVar(&backupOptions.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)") f.StringArrayVar(&backupOptions.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns") f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems") f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes `filename[:header]`, exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)") f.BoolVar(&backupOptions.ExcludeCaches, "exclude-caches", false, `excludes cache directories that are marked with a CACHEDIR.TAG file. See https://bford.info/cachedir/ for the Cache Directory Tagging Standard`) f.StringVar(&backupOptions.ExcludeLargerThan, "exclude-larger-than", "", "max `size` of the files to be backed up (allowed suffixes: k/K, m/M, g/G, t/T)") f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin") f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "`filename` to use when reading from stdin") f.StringArrayVar(&backupOptions.Tags, "tag", nil, "add a `tag` for the new snapshot (can be specified multiple times)") f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag") f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually") f.MarkDeprecated("hostname", "use --host") f.StringArrayVar(&backupOptions.FilesFrom, "files-from", nil, "read the files to backup from `file` (can be combined with file args/can be specified multiple times)") f.StringVar(&backupOptions.TimeStamp, "time", "", "`time` of the backup (ex. '2012-11-01 22:08:41') (default: now)") f.BoolVar(&backupOptions.WithAtime, "with-atime", false, "store the atime for all files and directories") f.BoolVar(&backupOptions.IgnoreInode, "ignore-inode", false, "ignore inode number changes when checking for modified files") if runtime.GOOS == "windows" { f.BoolVar(&backupOptions.UseFsSnapshot, "use-fs-snapshot", false, "use filesystem snapshot where possible (currently only Windows VSS)") } } // filterExisting returns a slice of all existing items, or an error if no // items exist at all. func filterExisting(items []string) (result []string, err error) { for _, item := range items { _, err := fs.Lstat(item) if err != nil && os.IsNotExist(errors.Cause(err)) { Warnf("%v does not exist, skipping\n", item) continue } result = append(result, item) } if len(result) == 0 { return nil, errors.Fatal("all target directories/files do not exist") } return } // readFromFile will read all lines from the given filename and return them as // a string array, if filename is empty readFromFile returns and empty string // array. If filename is a dash (-), readFromFile will read the lines from the // standard input. func readLinesFromFile(filename string) ([]string, error) { if filename == "" { return nil, nil } var ( data []byte err error ) if filename == "-" { data, err = ioutil.ReadAll(os.Stdin) } else { data, err = textfile.Read(filename) } if err != nil { return nil, err } var lines []string scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) // ignore empty lines if line == "" { continue } // strip comments if strings.HasPrefix(line, "#") { continue } lines = append(lines, line) } if err := scanner.Err(); err != nil { return nil, err } return lines, nil } // Check returns an error when an invalid combination of options was set. func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error { if gopts.password == "" { for _, filename := range opts.FilesFrom { if filename == "-" { return errors.Fatal("unable to read password from stdin when data is to be read from stdin, use --password-file or $RESTIC_PASSWORD") } } } if opts.Stdin { if len(opts.FilesFrom) > 0 { return errors.Fatal("--stdin and --files-from cannot be used together") } if len(args) > 0 { return errors.Fatal("--stdin was specified and files/dirs were listed as arguments") } } return nil } // collectRejectByNameFuncs returns a list of all functions which may reject data // from being saved in a snapshot based on path only func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectByNameFunc, err error) { // exclude restic cache if repo.Cache != nil { f, err := rejectResticCache(repo) if err != nil { return nil, err } fs = append(fs, f) } // add patterns from file if len(opts.ExcludeFiles) > 0 { excludes, err := readExcludePatternsFromFiles(opts.ExcludeFiles) if err != nil { return nil, err } opts.Excludes = append(opts.Excludes, excludes...) } if len(opts.InsensitiveExcludeFiles) > 0 { excludes, err := readExcludePatternsFromFiles(opts.InsensitiveExcludeFiles) if err != nil { return nil, err } opts.InsensitiveExcludes = append(opts.InsensitiveExcludes, excludes...) } if len(opts.InsensitiveExcludes) > 0 { fs = append(fs, rejectByInsensitivePattern(opts.InsensitiveExcludes)) } if len(opts.Excludes) > 0 { fs = append(fs, rejectByPattern(opts.Excludes)) } if opts.ExcludeCaches { opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55") } for _, spec := range opts.ExcludeIfPresent { f, err := rejectIfPresent(spec) if err != nil { return nil, err } fs = append(fs, f) } return fs, nil } // collectRejectFuncs returns a list of all functions which may reject data // from being saved in a snapshot based on path and file info func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, err error) { // allowed devices if opts.ExcludeOtherFS && !opts.Stdin { f, err := rejectByDevice(targets) if err != nil { return nil, err } fs = append(fs, f) } if len(opts.ExcludeLargerThan) != 0 && !opts.Stdin { f, err := rejectBySize(opts.ExcludeLargerThan) if err != nil { return nil, err } fs = append(fs, f) } return fs, nil } // readExcludePatternsFromFiles reads all exclude files and returns the list of // exclude patterns. For each line, leading and trailing white space is removed // and comment lines are ignored. For each remaining pattern, environment // variables are resolved. For adding a literal dollar sign ($), write $$ to // the file. func readExcludePatternsFromFiles(excludeFiles []string) ([]string, error) { getenvOrDollar := func(s string) string { if s == "$" { return "$" } return os.Getenv(s) } var excludes []string for _, filename := range excludeFiles { err := func() (err error) { data, err := textfile.Read(filename) if err != nil { return err } scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) // ignore empty lines if line == "" { continue } // strip comments if strings.HasPrefix(line, "#") { continue } line = os.Expand(line, getenvOrDollar) excludes = append(excludes, line) } return scanner.Err() }() if err != nil { return nil, err } } return excludes, nil } // collectTargets returns a list of target files/dirs from several sources. func collectTargets(opts BackupOptions, args []string) (targets []string, err error) { if opts.Stdin { return nil, nil } var lines []string for _, file := range opts.FilesFrom { fromfile, err := readLinesFromFile(file) if err != nil { return nil, err } // expand wildcards for _, line := range fromfile { var expanded []string expanded, err := filepath.Glob(line) if err != nil { return nil, errors.WithMessage(err, fmt.Sprintf("pattern: %s", line)) } if len(expanded) == 0 { Warnf("pattern %q does not match any files, skipping\n", line) } lines = append(lines, expanded...) } } // merge files from files-from into normal args so we can reuse the normal // args checks and have the ability to use both files-from and args at the // same time args = append(args, lines...) if len(args) == 0 && !opts.Stdin { return nil, errors.Fatal("nothing to backup, please specify target files/dirs") } targets = args targets, err = filterExisting(targets) if err != nil { return nil, err } return targets, nil } // parent returns the ID of the parent snapshot. If there is none, nil is // returned. func findParentSnapshot(ctx context.Context, repo restic.Repository, opts BackupOptions, targets []string) (parentID *restic.ID, err error) { // Force using a parent if !opts.Force && opts.Parent != "" { id, err := restic.FindSnapshot(ctx, repo, opts.Parent) if err != nil { return nil, errors.Fatalf("invalid id %q: %v", opts.Parent, err) } parentID = &id } // Find last snapshot to set it as parent, if not already set if !opts.Force && parentID == nil { id, err := restic.FindLatestSnapshot(ctx, repo, targets, []restic.TagList{}, []string{opts.Host}) if err == nil { parentID = &id } else if err != restic.ErrNoSnapshotFound { return nil, err } } return parentID, nil } func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { err := opts.Check(gopts, args) if err != nil { return err } targets, err := collectTargets(opts, args) if err != nil { return err } timeStamp := time.Now() if opts.TimeStamp != "" { timeStamp, err = time.ParseInLocation(TimeFormat, opts.TimeStamp, time.Local) if err != nil { return errors.Fatalf("error in time option: %v\n", err) } } var t tomb.Tomb if gopts.verbosity >= 2 && !gopts.JSON { Verbosef("open repository\n") } repo, err := OpenRepository(gopts) if err != nil { return err } type ArchiveProgressReporter interface { CompleteItem(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) StartFile(filename string) CompleteBlob(filename string, bytes uint64) ScannerError(item string, fi os.FileInfo, err error) error ReportTotal(item string, s archiver.ScanStats) SetMinUpdatePause(d time.Duration) Run(ctx context.Context) error Error(item string, fi os.FileInfo, err error) error Finish(snapshotID restic.ID) // ui.StdioWrapper Stdout() io.WriteCloser Stderr() io.WriteCloser // ui.Message E(msg string, args ...interface{}) P(msg string, args ...interface{}) V(msg string, args ...interface{}) VV(msg string, args ...interface{}) } var p ArchiveProgressReporter if gopts.JSON { p = json.NewBackup(term, gopts.verbosity) } else { p = ui.NewBackup(term, gopts.verbosity) } // use the terminal for stdout/stderr prevStdout, prevStderr := gopts.stdout, gopts.stderr defer func() { gopts.stdout, gopts.stderr = prevStdout, prevStderr }() gopts.stdout, gopts.stderr = p.Stdout(), p.Stderr() if s, ok := os.LookupEnv("RESTIC_PROGRESS_FPS"); ok { fps, err := strconv.Atoi(s) if err == nil && fps >= 1 { if fps > 60 { fps = 60 } p.SetMinUpdatePause(time.Second / time.Duration(fps)) } } t.Go(func() error { return p.Run(t.Context(gopts.ctx)) }) if !gopts.JSON { p.V("lock repository") } lock, err := lockRepo(gopts.ctx, repo) defer unlockRepo(lock) if err != nil { return err } // rejectByNameFuncs collect functions that can reject items from the backup based on path only rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo, targets) if err != nil { return err } // rejectFuncs collect functions that can reject items from the backup based on path and file info rejectFuncs, err := collectRejectFuncs(opts, repo, targets) if err != nil { return err } if !gopts.JSON { p.V("load index files") } err = repo.LoadIndex(gopts.ctx) if err != nil { return err } parentSnapshotID, err := findParentSnapshot(gopts.ctx, repo, opts, targets) if err != nil { return err } if !gopts.JSON && parentSnapshotID != nil { p.V("using parent snapshot %v\n", parentSnapshotID.Str()) } selectByNameFilter := func(item string) bool { for _, reject := range rejectByNameFuncs { if reject(item) { return false } } return true } selectFilter := func(item string, fi os.FileInfo) bool { for _, reject := range rejectFuncs { if reject(item, fi) { return false } } return true } var targetFS fs.FS = fs.Local{} if runtime.GOOS == "windows" && opts.UseFsSnapshot { if err = fs.HasSufficientPrivilegesForVSS(); err != nil { return err } errorHandler := func(item string, err error) error { return p.Error(item, nil, err) } messageHandler := func(msg string, args ...interface{}) { if !gopts.JSON { p.P(msg, args...) } } localVss := fs.NewLocalVss(errorHandler, messageHandler) defer localVss.DeleteSnapshots() targetFS = localVss } if opts.Stdin { if !gopts.JSON { p.V("read data from stdin") } filename := path.Join("/", opts.StdinFilename) targetFS = &fs.Reader{ ModTime: timeStamp, Name: filename, Mode: 0644, ReadCloser: os.Stdin, } targets = []string{filename} } sc := archiver.NewScanner(targetFS) sc.SelectByName = selectByNameFilter sc.Select = selectFilter sc.Error = p.ScannerError sc.Result = p.ReportTotal if !gopts.JSON { p.V("start scan on %v", targets) } t.Go(func() error { return sc.Scan(t.Context(gopts.ctx), targets) }) arch := archiver.New(repo, targetFS, archiver.Options{}) arch.SelectByName = selectByNameFilter arch.Select = selectFilter arch.WithAtime = opts.WithAtime success := true arch.Error = func(item string, fi os.FileInfo, err error) error { success = false return p.Error(item, fi, err) } arch.CompleteItem = p.CompleteItem arch.StartFile = p.StartFile arch.CompleteBlob = p.CompleteBlob arch.IgnoreInode = opts.IgnoreInode if parentSnapshotID == nil { parentSnapshotID = &restic.ID{} } snapshotOpts := archiver.SnapshotOptions{ Excludes: opts.Excludes, Tags: opts.Tags, Time: timeStamp, Hostname: opts.Host, ParentSnapshot: *parentSnapshotID, } if !gopts.JSON { p.V("start backup on %v", targets) } _, id, err := arch.Snapshot(gopts.ctx, targets, snapshotOpts) if err != nil { return errors.Fatalf("unable to save snapshot: %v", err) } // cleanly shutdown all running goroutines t.Kill(nil) // let's see if one returned an error err = t.Wait() // Report finished execution p.Finish(id) if !gopts.JSON { p.P("snapshot %s saved\n", id.Str()) } if !success { return ErrInvalidSourceData } // Return error if any return err }
1
14,454
I suggest "no parent snapshot found, will read all data\n".
restic-restic
go
@@ -48,9 +48,7 @@ namespace NLog.UnitTests { using (new NoThrowNLogExceptions()) { - LogManager.ThrowExceptions = true; - - LogManager.Configuration = XmlLoggingConfiguration.CreateFromXmlString(@" + var config = XmlLoggingConfiguration.CreateFromXmlString(@" <nlog throwExceptions='false'> <targets><target type='MethodCall' name='test' methodName='Throws' className='NLog.UnitTests.LogFactoryTests, NLog.UnitTests.netfx40' /></targets> <rules>
1
// // Copyright (c) 2004-2018 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // namespace NLog.UnitTests { using System; using System.Collections.Generic; using System.IO; using System.Threading; using NLog.Config; using NLog.UnitTests.Mocks; using Xunit; public class LogFactoryTests : NLogTestBase { [Fact] public void Flush_DoNotThrowExceptionsAndTimeout_DoesNotThrow() { using (new NoThrowNLogExceptions()) { LogManager.ThrowExceptions = true; LogManager.Configuration = XmlLoggingConfiguration.CreateFromXmlString(@" <nlog throwExceptions='false'> <targets><target type='MethodCall' name='test' methodName='Throws' className='NLog.UnitTests.LogFactoryTests, NLog.UnitTests.netfx40' /></targets> <rules> <logger name='*' minlevel='Debug' writeto='test'></logger> </rules> </nlog>"); ILogger logger = LogManager.GetCurrentClassLogger(); logger.Factory.Flush(_ => { }, TimeSpan.FromMilliseconds(1)); } } [Fact] public void InvalidXMLConfiguration_DoesNotThrowErrorWhen_ThrowExceptionFlagIsNotSet() { using (new NoThrowNLogExceptions()) { LogManager.Configuration = XmlLoggingConfiguration.CreateFromXmlString(@" <nlog internalLogIncludeTimestamp='IamNotBooleanValue'> <targets><target type='MethodCall' name='test' methodName='Throws' className='NLog.UnitTests.LogFactoryTests, NLog.UnitTests.netfx40' /></targets> <rules> <logger name='*' minlevel='Debug' writeto='test'></logger> </rules> </nlog>"); } } [Fact] public void InvalidXMLConfiguration_ThrowErrorWhen_ThrowExceptionFlagIsSet() { Boolean ExceptionThrown = false; try { LogManager.ThrowExceptions = true; LogManager.Configuration = XmlLoggingConfiguration.CreateFromXmlString(@" <nlog internalLogIncludeTimestamp='IamNotBooleanValue'> <targets><target type='MethodCall' name='test' methodName='Throws' className='NLog.UnitTests.LogFactoryTests, NLog.UnitTests.netfx40' /></targets> <rules> <logger name='*' minlevel='Debug' writeto='test'></logger> </rules> </nlog>"); } catch (Exception) { ExceptionThrown = true; } Assert.True(ExceptionThrown); } [Fact] public void Configuration_InaccessibleNLog_doesNotThrowException() { string tempDirectory = null; try { // Arrange var logFactory = CreateEmptyNLogFile(out tempDirectory, out var configFile); using (OpenStream(configFile)) { // Act var loggingConfig = logFactory.Configuration; // Assert Assert.Null(loggingConfig); } // Assert Assert.NotNull(logFactory.Configuration); } finally { if (tempDirectory != null && Directory.Exists(tempDirectory)) Directory.Delete(tempDirectory, true); } } [Fact] public void LoadConfiguration_InaccessibleNLog_throwException() { string tempDirectory = null; try { // Arrange var logFactory = CreateEmptyNLogFile(out tempDirectory, out var configFile); using (OpenStream(configFile)) { // Act var ex = Record.Exception(() => logFactory.LoadConfiguration(configFile)); // Assert Assert.IsType<IOException>(ex); } // Assert Assert.NotNull(logFactory.LoadConfiguration(configFile).Configuration); } finally { if (tempDirectory != null && Directory.Exists(tempDirectory)) Directory.Delete(tempDirectory, true); } } private static FileStream OpenStream(string configFile) { return new FileStream(configFile, FileMode.Open, FileAccess.ReadWrite, FileShare.None); } private static LogFactory CreateEmptyNLogFile(out string tempDirectory, out string filePath) { tempDirectory = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString("N")); filePath = Path.Combine(tempDirectory, "NLog.config"); Directory.CreateDirectory(tempDirectory); File.WriteAllText(filePath, "<nlog />"); LogFactory logFactory = new LogFactory(); logFactory.SetCandidateConfigFilePaths(new[] { filePath }); return logFactory; } [Fact] public void SecondaryLogFactoryDoesNotTakePrimaryLogFactoryLock() { File.WriteAllText("NLog.config", "<nlog />"); try { bool threadTerminated; var primaryLogFactory = LogManager.factory; var primaryLogFactoryLock = primaryLogFactory._syncRoot; // Simulate a potential deadlock. // If the creation of the new LogFactory takes the lock of the global LogFactory, the thread will deadlock. lock (primaryLogFactoryLock) { var thread = new Thread(() => { (new LogFactory()).GetCurrentClassLogger(); }); thread.Start(); threadTerminated = thread.Join(TimeSpan.FromSeconds(1)); } Assert.True(threadTerminated); } finally { try { File.Delete("NLog.config"); } catch { } } } [Fact] public void ReloadConfigOnTimer_DoesNotThrowConfigException_IfConfigChangedInBetween() { EventHandler<LoggingConfigurationChangedEventArgs> testChanged = null; try { LogManager.Configuration = null; var loggingConfiguration = new LoggingConfiguration(); LogManager.Configuration = loggingConfiguration; var logFactory = new LogFactory(loggingConfiguration); var differentConfiguration = new LoggingConfiguration(); // Verify that the random configuration change is ignored (Only the final reset is reacted upon) bool called = false; LoggingConfiguration oldConfiguration = null, newConfiguration = null; testChanged = (s, e) => { called = true; oldConfiguration = e.DeactivatedConfiguration; newConfiguration = e.ActivatedConfiguration; }; LogManager.LogFactory.ConfigurationChanged += testChanged; var exRecorded = Record.Exception(() => logFactory.ReloadConfigOnTimer(differentConfiguration)); Assert.Null(exRecorded); // Final reset clears the configuration, so it is changed to null LogManager.Configuration = null; Assert.True(called); Assert.Equal(loggingConfiguration, oldConfiguration); Assert.Null(newConfiguration); } finally { if (testChanged != null) LogManager.LogFactory.ConfigurationChanged -= testChanged; } } private class ReloadNullConfiguration : LoggingConfiguration { public override LoggingConfiguration Reload() { return null; } } [Fact] public void ReloadConfigOnTimer_DoesNotThrowConfigException_IfConfigReloadReturnsNull() { var loggingConfiguration = new ReloadNullConfiguration(); LogManager.Configuration = loggingConfiguration; var logFactory = new LogFactory(loggingConfiguration); var exRecorded = Record.Exception(() => logFactory.ReloadConfigOnTimer(loggingConfiguration)); Assert.Null(exRecorded); } [Fact] public void ReloadConfigOnTimer_Raises_ConfigurationReloadedEvent() { var called = false; var loggingConfiguration = new LoggingConfiguration(); LogManager.Configuration = loggingConfiguration; var logFactory = new LogFactory(loggingConfiguration); logFactory.ConfigurationReloaded += (sender, args) => { called = true; }; logFactory.ReloadConfigOnTimer(loggingConfiguration); Assert.True(called); } [Fact] public void ReloadConfigOnTimer_When_No_Exception_Raises_ConfigurationReloadedEvent_With_Correct_Sender() { object calledBy = null; var loggingConfiguration = new LoggingConfiguration(); LogManager.Configuration = loggingConfiguration; var logFactory = new LogFactory(loggingConfiguration); logFactory.ConfigurationReloaded += (sender, args) => { calledBy = sender; }; logFactory.ReloadConfigOnTimer(loggingConfiguration); Assert.Same(calledBy, logFactory); } [Fact] public void ReloadConfigOnTimer_When_No_Exception_Raises_ConfigurationReloadedEvent_With_Argument_Indicating_Success() { LoggingConfigurationReloadedEventArgs arguments = null; var loggingConfiguration = new LoggingConfiguration(); LogManager.Configuration = loggingConfiguration; var logFactory = new LogFactory(loggingConfiguration); logFactory.ConfigurationReloaded += (sender, args) => { arguments = args; }; logFactory.ReloadConfigOnTimer(loggingConfiguration); Assert.True(arguments.Succeeded); } /// <summary> /// We should be forward compatible so that we can add easily attributes in the future. /// </summary> [Fact] public void NewAttrOnNLogLevelShouldNotThrowError() { LogManager.Configuration = XmlLoggingConfiguration.CreateFromXmlString(@" <nlog throwExceptions='true' imAnewAttribute='noError'> <targets><target type='file' name='f1' filename='test.log' /></targets> <rules> <logger name='*' minlevel='Debug' writeto='f1'></logger> </rules> </nlog>"); } [Fact] public void ValueWithVariableMustNotCauseInfiniteRecursion() { LogManager.Configuration = null; var filename = "NLog.config"; File.WriteAllText(filename, @" <nlog> <variable name='dir' value='c:\mylogs' /> <targets> <target name='f' type='file' fileName='${var:dir}\test.log' /> </targets> <rules> <logger name='*' writeTo='f' /> </rules> </nlog>"); try { var x = LogManager.Configuration; //2nd call var config = new XmlLoggingConfiguration(filename); } finally { File.Delete(filename); } } [Fact] public void EnableAndDisableLogging() { LogFactory factory = new LogFactory(); #pragma warning disable 618 // In order Suspend => Resume Assert.True(factory.IsLoggingEnabled()); factory.DisableLogging(); Assert.False(factory.IsLoggingEnabled()); factory.EnableLogging(); Assert.True(factory.IsLoggingEnabled()); #pragma warning restore 618 } [Fact] public void SuspendAndResumeLogging_InOrder() { LogFactory factory = new LogFactory(); // In order Suspend => Resume [Case 1] Assert.True(factory.IsLoggingEnabled()); factory.SuspendLogging(); Assert.False(factory.IsLoggingEnabled()); factory.ResumeLogging(); Assert.True(factory.IsLoggingEnabled()); // In order Suspend => Resume [Case 2] using (var factory2 = new LogFactory()) { Assert.True(factory.IsLoggingEnabled()); factory.SuspendLogging(); Assert.False(factory.IsLoggingEnabled()); factory.ResumeLogging(); Assert.True(factory.IsLoggingEnabled()); } } [Fact] public void SuspendAndResumeLogging_OutOfOrder() { LogFactory factory = new LogFactory(); // Out of order Resume => Suspend => (Suspend => Resume) factory.ResumeLogging(); Assert.True(factory.IsLoggingEnabled()); factory.SuspendLogging(); Assert.True(factory.IsLoggingEnabled()); factory.SuspendLogging(); Assert.False(factory.IsLoggingEnabled()); factory.ResumeLogging(); Assert.True(factory.IsLoggingEnabled()); } [Theory] [MemberData(nameof(GetConfigFile_absolutePath_loads_testData))] public void GetConfigFile_absolutePath_loads(string filename, string accepts, string expected, string baseDir) { // Arrange var fileMock = new FileMock(f => f == accepts); var factory = new LogFactory(null, fileMock); var appDomain = LogFactory.CurrentAppDomain; try { LogFactory.CurrentAppDomain = new AppDomainMock(baseDir); // Act var result = factory.GetConfigFile(filename); // Assert Assert.Equal(expected, result); } finally { //restore LogFactory.CurrentAppDomain = appDomain; } } public static IEnumerable<object[]> GetConfigFile_absolutePath_loads_testData() { var d = Path.DirectorySeparatorChar; var baseDir = Path.GetTempPath(); var dirInBaseDir = $"{baseDir}dir1"; yield return new object[] { $"{baseDir}configfile", $"{baseDir}configfile", $"{baseDir}configfile", dirInBaseDir }; yield return new object[] { "nlog.config", $"{baseDir}dir1{d}nlog.config", $"{baseDir}dir1{d}nlog.config", dirInBaseDir }; //exists yield return new object[] { "nlog.config", $"{baseDir}dir1{d}nlog2.config", "nlog.config", dirInBaseDir}; //not existing, fallback } } }
1
18,046
`LogManager.ThrowExceptions = true` has very special meaning when unit-testing. Why the change to `LogFactory.ThrowExceptions = true` and enabling after the config-load?
NLog-NLog
.cs
@@ -27,14 +27,12 @@ var formatter = this; string = function(value) { if (value != null) { value = value.replace(/\\/g, '\\\\'); - value = value.replace(/\"/g, '\\"'); + value = value.replace(/\'/g, '\\\''); value = value.replace(/\r/g, '\\r'); value = value.replace(/\n/g, '\\n'); - value = value.replace(/@/g, '\\@'); - value = value.replace(/\$/g, '\\$'); - return '"' + value + '"'; + return "'" + value + "'"; } else { - return '""'; + return "'"; } }
1
/* * Format for Selenium Remote Control Perl client. */ var subScriptLoader = Components.classes["@mozilla.org/moz/jssubscript-loader;1"].getService(Components.interfaces.mozIJSSubScriptLoader); subScriptLoader.loadSubScript('chrome://selenium-ide/content/formats/remoteControl.js', this); this.name = "perl-rc"; // method name will not be used in this format function testMethodName(testName) { return testName; } var originalFormatCommands = formatCommands; formatCommands = function(commands) { this.tests = 0; var lines = originalFormatCommands(commands); if (this.tests == 0) { lines += addIndent("pass;\n"); } return lines; } var formatter = this; string = function(value) { if (value != null) { value = value.replace(/\\/g, '\\\\'); value = value.replace(/\"/g, '\\"'); value = value.replace(/\r/g, '\\r'); value = value.replace(/\n/g, '\\n'); value = value.replace(/@/g, '\\@'); value = value.replace(/\$/g, '\\$'); return '"' + value + '"'; } else { return '""'; } } variableName = function(value) { return "$" + value; } concatString = function(array) { return array.join(" . "); } function assertTrue(expression) { if (formatter.assertOrVerifyFailureOnNext) { return expression.toString() + " or die;"; } else { formatter.tests++; if (expression.assertable) { expression.suffix = "_ok"; return expression.toString() + ";"; } else { return "ok(" + expression.toString() + ");"; } } } function assertFalse(expression) { if (formatter.assertOrVerifyFailureOnNext) { return expression.toString() + " and die;"; } else { formatter.tests++; return "ok(not " + expression.toString() + ");"; } } var verifyTrue = assertTrue; var verifyFalse = assertFalse; function joinExpression(expression) { return "join(',', " + expression.toString() + ")"; } function assignToVariable(type, variable, expression) { if (type == 'String[]') { return "my @" + variable + " = " + expression.toString(); } else { return "my $" + variable + " = " + expression.toString(); } } function waitFor(expression) { return "WAIT: {\n" + indents(1) + "for (1..60) {\n" + indents(2) + "if (eval { " + expression.toString() + " }) { pass; last WAIT }\n" + indents(2) + "sleep(1);\n" + indents(1) + "}\n" + indents(1) + 'fail("timeout");\n' + "}"; } function assertOrVerifyFailure(line, isAssert) { return 'dies_ok { ' + line + ' };'; } Equals.prototype.toString = function() { return this.e1.toString() + " eq " + this.e2.toString(); } NotEquals.prototype.toString = function() { return this.e1.toString() + " ne " + this.e2.toString(); } Equals.prototype.assert = function() { if (formatter.assertOrVerifyFailureOnNext) { return assertTrue(this); } else { formatter.tests++; if (!this.e2.args) { return "is(" + this.e1 + ", " + this.e2 + ");"; } else { var expression = this.e2; expression.suffix = "_is"; expression.noGet = true; expression.args.push(this.e1); return expression.toString() + ";"; } } } Equals.prototype.verify = Equals.prototype.assert; NotEquals.prototype.assert = function() { if (formatter.assertOrVerifyFailureOnNext) { return assertTrue(this); } else { if (!this.e2.args) { return "isnt(" + this.e1 + ", " + this.e2 + ");"; } else { formatter.tests++; var expression = this.e2; expression.suffix = "_isnt"; expression.noGet = true; expression.args.push(this.e1); return expression.toString() + ";"; } } } NotEquals.prototype.verify = NotEquals.prototype.assert; RegexpMatch.prototype.toString = function() { return this.expression + " =~ /" + this.pattern.replace(/\//g, "\\/") + "/"; } RegexpNotMatch.prototype.toString = function() { return notOperator() + "(" + RegexpMatch.prototype.toString.call(this) + ")"; } function ifCondition(expression, callback) { return "if (" + expression.toString() + ") {\n" + callback() + "}"; } function pause(milliseconds) { return "sleep(" + (parseInt(milliseconds) / 1000) + ");"; } function echo(message) { return "print(" + xlateArgument(message) + ' . "\\n");' } function statement(expression) { if (!formatter.assertOrVerifyFailureOnNext) { formatter.tests++; expression.suffix = "_ok"; } return expression.toString() + ";"; } function array(value) { var str = '('; for (var i = 0; i < value.length; i++) { str += string(value[i]); if (i < value.length - 1) str += ", "; } str += ')'; return str; } function nonBreakingSpace() { return "\"\\x{00A0}\""; } CallSelenium.prototype.assertable = true; CallSelenium.prototype.toString = function() { var result = ''; if (this.negative) { result += '!'; } if (options.receiver) { result += options.receiver + '->'; } var command = underscore(this.message); if (this.noGet) { command = command.replace(/^get_/, ''); } result += command; if (this.suffix) { result += this.suffix; } result += '('; for (var i = 0; i < this.args.length; i++) { result += this.args[i]; if (i < this.args.length - 1) { result += ', '; } } result += ')'; return result; } function formatComment(comment) { return comment.comment.replace(/.+/mg, function(str) { return "# " + str; }); } this.options = { receiver: "$sel", rcHost: "localhost", rcPort: "4444", environment: "*chrome", header: 'use strict;\n' + 'use warnings;\n' + 'use Time::HiRes qw(sleep);\n' + 'use Test::WWW::Selenium;\n' + 'use Test::More "no_plan";\n' + 'use Test::Exception;\n' + '\n' + 'my ${receiver} = Test::WWW::Selenium->new( host => "${rcHost}", \n' + ' port => ${rcPort}, \n' + ' browser => "${environment}", \n' + ' browser_url => "${baseURL}" );\n' + '\n', footer: "", indent: "4", initialIndents: '0' }; this.configForm = '<description>Variable for Selenium instance</description>' + '<textbox id="options_receiver" />' + '<description>Selenium RC host</description>' + '<textbox id="options_rcHost" />' + '<description>Selenium RC port</description>' + '<textbox id="options_rcPort" />' + '<description>Environment</description>' + '<textbox id="options_environment" />' + '<description>Header</description>' + '<textbox id="options_header" multiline="true" flex="1" rows="4"/>' + '<description>Footer</description>' + '<textbox id="options_footer" multiline="true" flex="1" rows="4"/>' + '<description>Indent</description>' + '<menulist id="options_indent"><menupopup>' + '<menuitem label="Tab" value="tab"/>' + '<menuitem label="1 space" value="1"/>' + '<menuitem label="2 spaces" value="2"/>' + '<menuitem label="3 spaces" value="3"/>' + '<menuitem label="4 spaces" value="4"/>' + '<menuitem label="5 spaces" value="5"/>' + '<menuitem label="6 spaces" value="6"/>' + '<menuitem label="7 spaces" value="7"/>' + '<menuitem label="8 spaces" value="8"/>' + '</menupopup></menulist>';
1
10,826
It should be return "''"; I will fix it
SeleniumHQ-selenium
java
@@ -100,11 +100,10 @@ public class AcceptanceTest extends STBBaseTst { ASTMethodDeclaration node = acu.findDescendantsOfType(ASTMethodDeclaration.class).get(0); Scope s = node.getScope(); Map<NameDeclaration, List<NameOccurrence>> m = s.getDeclarations(); - for (Iterator<NameDeclaration> i = m.keySet().iterator(); i.hasNext();) { - NameDeclaration d = i.next(); - assertEquals("buz", d.getImage()); - assertEquals("ArrayList", ((TypedNameDeclaration) d).getTypeImage()); - List<NameOccurrence> u = m.get(d); + for (Map.Entry<NameDeclaration, List<NameOccurrence>> entry : m.entrySet()) { + assertEquals("buz", entry.getKey().getImage()); + assertEquals("ArrayList", ((TypedNameDeclaration) entry.getKey()).getTypeImage()); + List<NameOccurrence> u = m.get(entry.getKey()); assertEquals(1, u.size()); NameOccurrence o = u.get(0); int beginLine = o.getLocation().getBeginLine();
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.lang.java.symboltable; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.util.Iterator; import java.util.List; import java.util.Map; import org.junit.Test; import net.sourceforge.pmd.PMD; import net.sourceforge.pmd.lang.ast.Node; import net.sourceforge.pmd.lang.java.ast.ASTBlock; import net.sourceforge.pmd.lang.java.ast.ASTCatchStatement; import net.sourceforge.pmd.lang.java.ast.ASTEqualityExpression; import net.sourceforge.pmd.lang.java.ast.ASTInitializer; import net.sourceforge.pmd.lang.java.ast.ASTMethodDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTVariableDeclaratorId; import net.sourceforge.pmd.lang.symboltable.NameDeclaration; import net.sourceforge.pmd.lang.symboltable.NameOccurrence; import net.sourceforge.pmd.lang.symboltable.Scope; public class AcceptanceTest extends STBBaseTst { @Test public void testClashingSymbols() { parseCode(TEST1); } @Test public void testInitializer() { parseCode(TEST_INITIALIZERS); ASTInitializer a = acu.findDescendantsOfType(ASTInitializer.class).get(0); assertFalse(a.isStatic()); a = acu.findDescendantsOfType(ASTInitializer.class).get(1); assertTrue(a.isStatic()); } @Test public void testCatchBlocks() { parseCode(TEST_CATCH_BLOCKS); ASTCatchStatement c = acu.findDescendantsOfType(ASTCatchStatement.class).get(0); ASTBlock a = c.findDescendantsOfType(ASTBlock.class).get(0); Scope s = a.getScope(); Map<NameDeclaration, List<NameOccurrence>> vars = s.getParent().getDeclarations(); assertEquals(1, vars.size()); NameDeclaration v = vars.keySet().iterator().next(); assertEquals("e", v.getImage()); assertEquals(1, (vars.get(v)).size()); } @Test public void testEq() { parseCode(TEST_EQ); ASTEqualityExpression e = acu.findDescendantsOfType(ASTEqualityExpression.class).get(0); ASTMethodDeclaration method = e.getFirstParentOfType(ASTMethodDeclaration.class); Scope s = method.getScope(); Map<NameDeclaration, List<NameOccurrence>> m = s.getDeclarations(); assertEquals(2, m.size()); for (Map.Entry<NameDeclaration, List<NameOccurrence>> entry : m.entrySet()) { NameDeclaration vnd = entry.getKey(); List<NameOccurrence> usages = entry.getValue(); if (vnd.getImage().equals("a") || vnd.getImage().equals("b")) { assertEquals(1, usages.size()); assertEquals(3, usages.get(0).getLocation().getBeginLine()); } else { fail("Unkown variable " + vnd); } } } @Test public void testFieldFinder() { parseCode(TEST_FIELD); // System.out.println(TEST_FIELD); ASTVariableDeclaratorId declaration = acu.findDescendantsOfType(ASTVariableDeclaratorId.class).get(1); assertEquals(3, declaration.getBeginLine()); assertEquals("bbbbbbbbbb", declaration.getImage()); assertEquals(1, declaration.getUsages().size()); NameOccurrence no = declaration.getUsages().get(0); Node location = no.getLocation(); assertEquals(6, location.getBeginLine()); // System.out.println("variable " + declaration.getImage() + " is used // here: " + location.getImage()); } @Test public void testDemo() { parseCode(TEST_DEMO); // System.out.println(TEST_DEMO); ASTMethodDeclaration node = acu.findDescendantsOfType(ASTMethodDeclaration.class).get(0); Scope s = node.getScope(); Map<NameDeclaration, List<NameOccurrence>> m = s.getDeclarations(); for (Iterator<NameDeclaration> i = m.keySet().iterator(); i.hasNext();) { NameDeclaration d = i.next(); assertEquals("buz", d.getImage()); assertEquals("ArrayList", ((TypedNameDeclaration) d).getTypeImage()); List<NameOccurrence> u = m.get(d); assertEquals(1, u.size()); NameOccurrence o = u.get(0); int beginLine = o.getLocation().getBeginLine(); assertEquals(3, beginLine); // System.out.println("Variable: " + d.getImage()); // System.out.println("Type: " + d.getTypeImage()); // System.out.println("Usages: " + u.size()); // System.out.println("Used in line " + beginLine); } } @Test public void testEnum() { parseCode(NameOccurrencesTest.TEST_ENUM); ASTVariableDeclaratorId vdi = acu.findDescendantsOfType(ASTVariableDeclaratorId.class).get(0); List<NameOccurrence> usages = vdi.getUsages(); assertEquals(2, usages.size()); assertEquals(5, usages.get(0).getLocation().getBeginLine()); assertEquals(9, usages.get(1).getLocation().getBeginLine()); } @Test public void testInnerOuterClass() { parseCode(TEST_INNER_CLASS); ASTVariableDeclaratorId vdi = acu.findDescendantsOfType(ASTVariableDeclaratorId.class).get(0); List<NameOccurrence> usages = vdi.getUsages(); assertEquals(2, usages.size()); assertEquals(5, usages.get(0).getLocation().getBeginLine()); assertEquals(10, usages.get(1).getLocation().getBeginLine()); } /** * Unit test for bug #1490 * * @see <a href="https://sourceforge.net/p/pmd/bugs/1490/">#1490 [java] PMD * Error while processing - NullPointerException</a> */ @Test public void testNullPointerEnumValueOfOverloaded() { parseCode("public enum EsmDcVoltageSensor {\n" + " A;\n" + " void bar(int ... args) {\n" + " int idx;\n" + " int startIdx;\n" + " String name = EsmDcVoltageSensor.valueOf((byte) (idx - startIdx)).getName();\n" + " }\n" // that's the overloaded method + " public EsmDCVoltageSensor valueOf(byte b) {\n" + " }\n" + "}\n"); } private static final String TEST_DEMO = "public class Foo {" + PMD.EOL + " void bar(ArrayList buz) { " + PMD.EOL + " buz.add(\"foo\");" + PMD.EOL + " } " + PMD.EOL + "}" + PMD.EOL; private static final String TEST_EQ = "public class Foo {" + PMD.EOL + " boolean foo(String a, String b) { " + PMD.EOL + " return a == b; " + PMD.EOL + " } " + PMD.EOL + "}" + PMD.EOL; private static final String TEST1 = "import java.io.*;" + PMD.EOL + "public class Foo {" + PMD.EOL + " void buz( ) {" + PMD.EOL + " Object o = new Serializable() { int x; };" + PMD.EOL + " Object o1 = new Serializable() { int x; };" + PMD.EOL + " }" + PMD.EOL + "}" + PMD.EOL; private static final String TEST_INITIALIZERS = "public class Foo {" + PMD.EOL + " {} " + PMD.EOL + " static {} " + PMD.EOL + "}" + PMD.EOL; private static final String TEST_CATCH_BLOCKS = "public class Foo {" + PMD.EOL + " void foo() { " + PMD.EOL + " try { " + PMD.EOL + " } catch (Exception e) { " + PMD.EOL + " e.printStackTrace(); " + PMD.EOL + " } " + PMD.EOL + " } " + PMD.EOL + "}" + PMD.EOL; private static final String TEST_FIELD = "public class MyClass {" + PMD.EOL + " private int aaaaaaaaaa; " + PMD.EOL + " boolean bbbbbbbbbb = MyClass.ASCENDING; " + PMD.EOL + " private int zzzzzzzzzz;" + PMD.EOL + " private void doIt() {" + PMD.EOL + " if (bbbbbbbbbb) {" + PMD.EOL + " }" + PMD.EOL + " }" + PMD.EOL + "}" + PMD.EOL; public static final String TEST_INNER_CLASS = "public class Outer {" + PMD.EOL + " private static class Inner {" + PMD.EOL + " private int i;" + PMD.EOL + " private Inner(int i) {" + PMD.EOL + " this.i = i;" + PMD.EOL + " }" + PMD.EOL + " }" + PMD.EOL + " public int modify(int i) {" + PMD.EOL + " Inner in = new Inner(i);" + PMD.EOL + " return in.i;" + PMD.EOL + " }" + PMD.EOL + "}" + PMD.EOL; public static junit.framework.Test suite() { return new junit.framework.JUnit4TestAdapter(AcceptanceTest.class); } }
1
13,826
This is wrong, should be `entry.getValue()`
pmd-pmd
java
@@ -363,7 +363,7 @@ func (r *ReconcileControlPlaneCerts) getServingCertificatesJSONPatch(cd *hivev1. additional.Domain, remoteSecretName(bundle.CertificateSecretRef.Name, cd))) } - var kubeAPIServerNamedCertsTemplate = `[ { "op": "replace", "path": "/spec/servingCerts/namedCertificates", "value": [ %s ] } ]` + var kubeAPIServerNamedCertsTemplate = `[ { "op": "add", "path": "/spec/servingCerts", "value": {} }, { "op": "add", "path": "/spec/servingCerts/namedCertificates", "value": [ ] }, { "op": "replace", "path": "/spec/servingCerts/namedCertificates", "value": [ %s ] } ]` namedCerts := buf.String() return fmt.Sprintf(kubeAPIServerNamedCertsTemplate, namedCerts), nil
1
package controlplanecerts import ( "context" "crypto/md5" "fmt" "io" "net/url" "sort" "strings" "time" "github.com/pkg/errors" log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" apihelpers "github.com/openshift/hive/pkg/apis/helpers" hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" "github.com/openshift/hive/pkg/constants" hivemetrics "github.com/openshift/hive/pkg/controller/metrics" controllerutils "github.com/openshift/hive/pkg/controller/utils" "github.com/openshift/hive/pkg/remoteclient" "github.com/openshift/hive/pkg/resource" k8slabels "github.com/openshift/hive/pkg/util/labels" ) const ( ControllerName = hivev1.ControlPlaneCertsControllerName openshiftConfigNamespace = "openshift-config" certsNotFoundReason = "ControlPlaneCertificatesNotFound" certsNotFoundMessage = "One or more serving certificates for the cluster control plane are missing" certsFoundReason = "ControlPlaneCertificatesFound" certsFoundMessage = "Control plane certificates are present" kubeAPIServerPatchTemplate = `[ {"op": "replace", "path": "/spec/forceRedeploymentReason", "value": %q } ]` ) var ( secretCheckInterval = 2 * time.Minute ) type applier interface { ApplyRuntimeObject(obj runtime.Object, scheme *runtime.Scheme) (resource.ApplyResult, error) } // Add creates a new ControlPlaneCerts Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager) error { logger := log.WithField("controller", ControllerName) concurrentReconciles, clientRateLimiter, queueRateLimiter, err := controllerutils.GetControllerConfig(mgr.GetClient(), ControllerName) if err != nil { logger.WithError(err).Error("could not get controller configurations") return err } return AddToManager(mgr, NewReconciler(mgr, clientRateLimiter), concurrentReconciles, queueRateLimiter) } // NewReconciler returns a new reconcile.Reconciler func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) reconcile.Reconciler { logger := log.WithField("controller", ControllerName) helper, err := resource.NewHelperWithMetricsFromRESTConfig(mgr.GetConfig(), ControllerName, logger) if err != nil { // Hard exit if we can't create this controller logger.WithError(err).Fatal("unable to create resource helper") } r := &ReconcileControlPlaneCerts{ Client: controllerutils.NewClientWithMetricsOrDie(mgr, ControllerName, &rateLimiter), scheme: mgr.GetScheme(), applier: helper, } return r } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { // Create a new controller c, err := controller.New("controlplanecerts-controller", mgr, controller.Options{ Reconciler: r, MaxConcurrentReconciles: concurrentReconciles, RateLimiter: rateLimiter, }) if err != nil { return err } // Watch for changes to ClusterDeployment err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, &handler.EnqueueRequestForObject{}) if err != nil { return err } return nil } var _ reconcile.Reconciler = &ReconcileControlPlaneCerts{} // ReconcileControlPlaneCerts reconciles a ClusterDeployment object type ReconcileControlPlaneCerts struct { client.Client scheme *runtime.Scheme applier applier } // Reconcile reads that state of the cluster for a ClusterDeployment object and makes changes based on the state read // and what is in the ClusterDeployment.Spec func (r *ReconcileControlPlaneCerts) Reconcile(request reconcile.Request) (reconcile.Result, error) { cdLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) cdLog.Info("reconciling cluster deployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, cdLog) defer recobsrv.ObserveControllerReconcileTime() // Fetch the ClusterDeployment instance cd := &hivev1.ClusterDeployment{} err := r.Get(context.TODO(), request.NamespacedName, cd) if err != nil { if apierrors.IsNotFound(err) { return reconcile.Result{}, nil } return reconcile.Result{}, err } // Ensure owner references are correctly set err = controllerutils.ReconcileOwnerReferences(cd, generateOwnershipUniqueKeys(cd), r, r.scheme, cdLog) if err != nil { cdLog.WithError(err).Error("Error reconciling object ownership") return reconcile.Result{}, err } // If the clusterdeployment is deleted, do not reconcile. if cd.DeletionTimestamp != nil { return reconcile.Result{}, nil } if !cd.Spec.Installed { return reconcile.Result{}, nil } existingSyncSet := &hivev1.SyncSet{} existingSyncSetNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: GenerateControlPlaneCertsSyncSetName(cd.Name)} err = r.Get(context.TODO(), existingSyncSetNamespacedName, existingSyncSet) if err != nil && !apierrors.IsNotFound(err) { cdLog.WithError(err).Error("failed to retrieve existing control plane certs syncset") return reconcile.Result{}, err } if apierrors.IsNotFound(err) { existingSyncSet = nil } secrets, secretsAvailable, err := r.getControlPlaneSecrets(cd, cdLog) if err != nil { cdLog.WithError(err).Error("failed to check cert secret availability") return reconcile.Result{}, err } // clear condition if certs were found updated, err := r.setCertsNotFoundCondition(cd, !secretsAvailable, cdLog) if err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot update cluster deployment secrets not found condition") return reconcile.Result{}, err } if updated { return reconcile.Result{}, nil } if !secretsAvailable { cdLog.Debugf("cert secrets are not available yet, requeueing clusterdeployment for %s", secretCheckInterval) return reconcile.Result{RequeueAfter: secretCheckInterval}, nil } if len(secrets) == 0 && existingSyncSet == nil { cdLog.Debug("no control plane certs needed, and no syncset exists, nothing to do") return reconcile.Result{}, nil } desiredSyncSet, err := r.generateControlPlaneCertsSyncSet(cd, secrets, cdLog) if err != nil { cdLog.WithError(err).Error("failed to generate control plane certs syncset") return reconcile.Result{}, err } if _, err = r.applier.ApplyRuntimeObject(desiredSyncSet, r.scheme); err != nil { cdLog.WithError(err).Error("failed to apply control plane certificates syncset") return reconcile.Result{}, err } return reconcile.Result{}, nil } func (r *ReconcileControlPlaneCerts) getControlPlaneSecrets(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) ([]*corev1.Secret, bool, error) { secretsNeeded, err := getControlPlaneSecretNames(cd, cdLog) if err != nil { return nil, false, err } if len(secretsNeeded) == 0 { cdLog.Debug("the control plane does not require any cert bundles") return nil, true, nil } secrets := []*corev1.Secret{} for _, secretName := range secretsNeeded { secret := &corev1.Secret{} err := r.Get(context.TODO(), types.NamespacedName{Namespace: cd.Namespace, Name: secretName}, secret) if err != nil { if apierrors.IsNotFound(err) { cdLog.WithField("secret", secretName).Debug("certificate secret is not available yet, will check later") return nil, false, nil } cdLog.WithError(err).WithField("secret", secretName).Debug("error retrieving certificate secret") return nil, false, err } secrets = append(secrets, secret) } cdLog.Debug("all required certificate secrets are available") return secrets, true, nil } func getControlPlaneSecretNames(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) ([]string, error) { certs := sets.NewString() if cd.Spec.ControlPlaneConfig.ServingCertificates.Default != "" { certs.Insert(cd.Spec.ControlPlaneConfig.ServingCertificates.Default) } for _, additional := range cd.Spec.ControlPlaneConfig.ServingCertificates.Additional { certs.Insert(additional.Name) } if certs.Len() == 0 { return nil, nil } cdLog.WithField("certbundles", certs.List()).Debug("cert bundles used by the control plane") secretsNeeded := sets.NewString() for _, cert := range certs.List() { bundle := certificateBundle(cd, cert) if bundle == nil { // should not happen if clusterdeployment was validated return nil, fmt.Errorf("no certificate bundle was found for %s", cert) } secretsNeeded.Insert(bundle.CertificateSecretRef.Name) } cdLog.WithField("secrets", secretsNeeded.List()).Debug("certificate secrets needed by the control plane") return secretsNeeded.List(), nil } func (r *ReconcileControlPlaneCerts) generateControlPlaneCertsSyncSet(cd *hivev1.ClusterDeployment, secrets []*corev1.Secret, cdLog log.FieldLogger) (*hivev1.SyncSet, error) { cdLog.Debug("generating syncset for control plane secrets") syncSet := &hivev1.SyncSet{ ObjectMeta: metav1.ObjectMeta{ Name: GenerateControlPlaneCertsSyncSetName(cd.Name), Namespace: cd.Namespace, }, Spec: hivev1.SyncSetSpec{ SyncSetCommonSpec: hivev1.SyncSetCommonSpec{ ResourceApplyMode: hivev1.UpsertResourceApplyMode, }, ClusterDeploymentRefs: []corev1.LocalObjectReference{ { Name: cd.Name, }, }, }, } // Using SecretMapping to sync secrets secretMappings := []hivev1.SecretMapping{} for _, secret := range secrets { cdLog.WithField("secret", secret.Name).Debug("adding secret to secretMappings list") secretMapping := hivev1.SecretMapping{ SourceRef: hivev1.SecretReference{ Namespace: secret.Namespace, Name: secret.Name, }, TargetRef: hivev1.SecretReference{ Name: remoteSecretName(secret.Name, cd), Namespace: openshiftConfigNamespace, }, } secretMappings = append(secretMappings, secretMapping) } syncSet.Spec.Secrets = secretMappings servingCertsPatchStr, err := r.getServingCertificatesJSONPatch(cd, cdLog) if err != nil { cdLog.WithError(err).Error("error building serving certificates JSON patch") return nil, err } cdLog.Debugf("build serving certs patch: %s", servingCertsPatchStr) servingCertsPatch := hivev1.SyncObjectPatch{ APIVersion: "config.openshift.io/v1", Kind: "APIServer", Name: "cluster", Patch: servingCertsPatchStr, PatchType: "json", } // kubeAPIServerPatch sets the forceRedeploymentField on the kube API server cluster operator // to a hash of all the cert secrets. If the content of the certs secrets changes, then the new // hash value will force the kube API server to redeploy and apply the new certs. kubeAPIServerPatch := hivev1.SyncObjectPatch{ APIVersion: "operator.openshift.io/v1", Kind: "KubeAPIServer", Name: "cluster", Patch: fmt.Sprintf(kubeAPIServerPatchTemplate, secretsHash(secrets)), PatchType: "json", } syncSet.Spec.Patches = []hivev1.SyncObjectPatch{servingCertsPatch, kubeAPIServerPatch} // ensure the syncset gets cleaned up when the clusterdeployment is deleted cdLog.WithField("derivedObject", syncSet.Name).Debug("Setting labels on derived object") syncSet.Labels = k8slabels.AddLabel(syncSet.Labels, constants.ClusterDeploymentNameLabel, cd.Name) syncSet.Labels = k8slabels.AddLabel(syncSet.Labels, constants.SyncSetTypeLabel, constants.SyncSetTypeControlPlaneCerts) if err := controllerutil.SetControllerReference(cd, syncSet, r.scheme); err != nil { cdLog.WithError(err).Error("error setting owner reference") return nil, err } return syncSet, nil } func (r *ReconcileControlPlaneCerts) getServingCertificatesJSONPatch(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (string, error) { var buf strings.Builder additionalCerts := cd.Spec.ControlPlaneConfig.ServingCertificates.Additional if cd.Spec.ControlPlaneConfig.ServingCertificates.Default != "" { cdLog.Debug("setting default serving certificate for control plane") apidomain, err := r.defaultControlPlaneDomain(cd) if err != nil { cdLog.WithError(err).Error("failed to get control plane domain") return "", err } cpCert := hivev1.ControlPlaneAdditionalCertificate{ Name: cd.Spec.ControlPlaneConfig.ServingCertificates.Default, Domain: apidomain, } additionalCerts = append([]hivev1.ControlPlaneAdditionalCertificate{cpCert}, additionalCerts...) } for i, additional := range additionalCerts { if i > 0 { buf.WriteString(",") } bundle := certificateBundle(cd, additional.Name) cdLog.WithField("name", additional.Name).Debug("adding named certificate to control plane config") buf.WriteString(fmt.Sprintf(` { "names": [ "%s" ], "servingCertificate": { "name": "%s" } }`, additional.Domain, remoteSecretName(bundle.CertificateSecretRef.Name, cd))) } var kubeAPIServerNamedCertsTemplate = `[ { "op": "replace", "path": "/spec/servingCerts/namedCertificates", "value": [ %s ] } ]` namedCerts := buf.String() return fmt.Sprintf(kubeAPIServerNamedCertsTemplate, namedCerts), nil } func (r *ReconcileControlPlaneCerts) setCertsNotFoundCondition(cd *hivev1.ClusterDeployment, notFound bool, cdLog log.FieldLogger) (bool, error) { status := corev1.ConditionFalse reason := certsFoundReason message := certsFoundMessage if notFound { status = corev1.ConditionTrue reason = certsNotFoundReason message = certsNotFoundMessage } conds, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck( cd.Status.Conditions, hivev1.ControlPlaneCertificateNotFoundCondition, status, reason, message, controllerutils.UpdateConditionNever, ) if !changed { return false, nil } cd.Status.Conditions = conds return true, r.Status().Update(context.TODO(), cd) } // defaultControlPlaneDomain will attempt to return the domain/hostname for the secondary API URL // for the cluster based on the contents of the clusterDeployment's adminKubeConfig secret. func (r *ReconcileControlPlaneCerts) defaultControlPlaneDomain(cd *hivev1.ClusterDeployment) (string, error) { apiurl, err := remoteclient.InitialURL(r.Client, cd) if err != nil { return "", errors.Wrap(err, "failed to fetch initial API URL") } u, err := url.Parse(apiurl) if err != nil { return "", errors.Wrap(err, "failed to parse cluster's API URL") } return u.Hostname(), nil } func remoteSecretName(secretName string, cd *hivev1.ClusterDeployment) string { return apihelpers.GetResourceName(cd.Name, secretName) } func certificateBundle(cd *hivev1.ClusterDeployment, name string) *hivev1.CertificateBundleSpec { for i, bundle := range cd.Spec.CertificateBundles { if bundle.Name == name { return &cd.Spec.CertificateBundles[i] } } return nil } // GenerateControlPlaneCertsSyncSetName generates the name of the SyncSet that holds the control plane certificates to sync. func GenerateControlPlaneCertsSyncSetName(name string) string { return apihelpers.GetResourceName(name, constants.ControlPlaneCertificateSuffix) } func writeSecretData(w io.Writer, secret *corev1.Secret) { // sort secret keys so we get a repeatable hash keys := make([]string, 0, len(secret.Data)) for k := range secret.Data { keys = append(keys, k) } sort.StringSlice(keys).Sort() fmt.Fprintf(w, "%s/%s\n", secret.Namespace, secret.Name) for _, k := range keys { fmt.Fprintf(w, "%s: %x\n", k, secret.Data[k]) } } func secretsHash(secrets []*corev1.Secret) string { // sort secrets by name so we get a repeatable hash sort.Slice(secrets, func(i, j int) bool { return secrets[i].Name < secrets[j].Name }) hashWriter := md5.New() for _, secret := range secrets { writeSecretData(hashWriter, secret) } return fmt.Sprintf("%x", hashWriter.Sum(nil)) } func generateOwnershipUniqueKeys(owner hivev1.MetaRuntimeObject) []*controllerutils.OwnershipUniqueKey { return []*controllerutils.OwnershipUniqueKey{ { TypeToList: &hivev1.SyncSetList{}, LabelSelector: map[string]string{ constants.ClusterDeploymentNameLabel: owner.GetName(), constants.SyncSetTypeLabel: constants.SyncSetTypeControlPlaneCerts, }, Controlled: true, }, } }
1
15,636
nit: why can't we combine the two add operations into one so that we add `{"nameCertificates": []}` to `/spec/servingCerts` ?
openshift-hive
go
@@ -3,13 +3,14 @@ // See the LICENSE file in the project root for more information. // ==++== -// - -// +// + +// // ==--== #include <assert.h> #include "sos.h" #include "safemath.h" +#include "holder.h" // This is the increment for the segment lookup data
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. // ==++== // // // ==--== #include <assert.h> #include "sos.h" #include "safemath.h" // This is the increment for the segment lookup data const int nSegLookupStgIncrement = 100; #define CCH_STRING_PREFIX_SUMMARY 64 /**********************************************************************\ * Routine Description: * * * * This function is called to update GC heap statistics. * * * \**********************************************************************/ void HeapStat::Add(DWORD_PTR aData, DWORD aSize) { if (head == 0) { head = new Node(); if (head == NULL) { ReportOOM(); ControlC = TRUE; return; } if (bHasStrings) { size_t capacity_pNew = _wcslen((WCHAR*)aData) + 1; WCHAR *pNew = new WCHAR[capacity_pNew]; if (pNew == NULL) { ReportOOM(); ControlC = TRUE; return; } wcscpy_s(pNew, capacity_pNew, (WCHAR*)aData); aData = (DWORD_PTR)pNew; } head->data = aData; } Node *walk = head; int cmp = 0; for (;;) { if (IsInterrupt()) return; cmp = CompareData(aData, walk->data); if (cmp == 0) break; if (cmp < 0) { if (walk->left == NULL) break; walk = walk->left; } else { if (walk->right == NULL) break; walk = walk->right; } } if (cmp == 0) { walk->count ++; walk->totalSize += aSize; } else { Node *node = new Node(); if (node == NULL) { ReportOOM(); ControlC = TRUE; return; } if (bHasStrings) { size_t capacity_pNew = _wcslen((WCHAR*)aData) + 1; WCHAR *pNew = new WCHAR[capacity_pNew]; if (pNew == NULL) { ReportOOM(); ControlC = TRUE; return; } wcscpy_s(pNew, capacity_pNew, (WCHAR*)aData); aData = (DWORD_PTR)pNew; } node->data = aData; node->totalSize = aSize; node->count ++; if (cmp < 0) { walk->left = node; } else { walk->right = node; } } } /**********************************************************************\ * Routine Description: * * * * This function compares two nodes in the tree. * * * \**********************************************************************/ int HeapStat::CompareData(DWORD_PTR d1, DWORD_PTR d2) { if (bHasStrings) return _wcscmp((WCHAR*)d1, (WCHAR*)d2); if (d1 > d2) return 1; if (d1 < d2) return -1; return 0; } /**********************************************************************\ * Routine Description: * * * * This function is called to sort all entries in the heap stat. * * * \**********************************************************************/ void HeapStat::Sort () { Node *root = head; head = NULL; ReverseLeftMost (root); Node *sortRoot = NULL; while (head) { Node *tmp = head; head = head->left; if (tmp->right) ReverseLeftMost (tmp->right); // add tmp tmp->right = NULL; tmp->left = NULL; SortAdd (sortRoot, tmp); } head = sortRoot; Linearize(); //reverse the order root = head; head = NULL; sortRoot = NULL; while (root) { Node *tmp = root->right; root->left = NULL; root->right = NULL; LinearAdd (sortRoot, root); root = tmp; } head = sortRoot; } void HeapStat::Linearize() { // Change binary tree to a linear tree Node *root = head; head = NULL; ReverseLeftMost (root); Node *sortRoot = NULL; while (head) { Node *tmp = head; head = head->left; if (tmp->right) ReverseLeftMost (tmp->right); // add tmp tmp->right = NULL; tmp->left = NULL; LinearAdd (sortRoot, tmp); } head = sortRoot; fLinear = TRUE; } void HeapStat::ReverseLeftMost (Node *root) { while (root) { Node *tmp = root->left; root->left = head; head = root; root = tmp; } } /**********************************************************************\ * Routine Description: * * * * This function is called to help to sort heap stat. * * * \**********************************************************************/ void HeapStat::SortAdd (Node *&root, Node *entry) { if (root == NULL) { root = entry; } else { Node *parent = root; Node *ptr = root; while (ptr) { parent = ptr; if (ptr->totalSize < entry->totalSize) ptr = ptr->right; else ptr = ptr->left; } if (parent->totalSize < entry->totalSize) parent->right = entry; else parent->left = entry; } } void HeapStat::LinearAdd(Node *&root, Node *entry) { if (root == NULL) { root = entry; } else { entry->right = root; root = entry; } } /**********************************************************************\ * Routine Description: * * * * This function is called to print GC heap statistics. * * * \**********************************************************************/ void HeapStat::Print(const char* label /* = NULL */) { if (label == NULL) { label = "Statistics:\n"; } ExtOut(label); if (bHasStrings) ExtOut("%8s %12s %s\n", "Count", "TotalSize", "String Value"); else ExtOut("%" POINTERSIZE "s %8s %12s %s\n","MT", "Count", "TotalSize", "Class Name"); Node *root = head; int ncount = 0; while (root) { if (IsInterrupt()) return; ncount += root->count; if (bHasStrings) { ExtOut("%8d %12I64u \"%S\"\n", root->count, (unsigned __int64)root->totalSize, root->data); } else { DMLOut("%s %8d %12I64u ", DMLDumpHeapMT(root->data), root->count, (unsigned __int64)root->totalSize); if (IsMTForFreeObj(root->data)) { ExtOut("%9s\n", "Free"); } else { wcscpy_s(g_mdName, mdNameLen, W("UNKNOWN")); NameForMT_s((DWORD_PTR) root->data, g_mdName, mdNameLen); ExtOut("%S\n", g_mdName); } } root = root->right; } ExtOut ("Total %d objects\n", ncount); } void HeapStat::Delete() { if (head == NULL) return; // Ensure the data structure is already linearized. if (!fLinear) Linearize(); while (head) { // The list is linearized on such that the left node is always null. Node *tmp = head; head = head->right; if (bHasStrings) delete[] ((WCHAR*)tmp->data); delete tmp; } // return to default state bHasStrings = FALSE; fLinear = FALSE; } // ----------------------------------------------------------------------- // // MethodTableCache implementation // // Used during heap traversals for quick object size computation // MethodTableInfo* MethodTableCache::Lookup (DWORD_PTR aData) { Node** addHere = &head; if (head != 0) { Node *walk = head; int cmp = 0; for (;;) { cmp = CompareData(aData, walk->data); if (cmp == 0) return &walk->info; if (cmp < 0) { if (walk->left == NULL) { addHere = &walk->left; break; } walk = walk->left; } else { if (walk->right == NULL) { addHere = &walk->right; break; } walk = walk->right; } } } Node* newNode = new Node(aData); if (newNode == NULL) { ReportOOM(); return NULL; } *addHere = newNode; return &newNode->info; } /**********************************************************************\ * Routine Description: * * * * This function compares two nodes in the tree. * * * \**********************************************************************/ int MethodTableCache::CompareData(DWORD_PTR d1, DWORD_PTR d2) { if (d1 > d2) return 1; if (d1 < d2) return -1; return 0; } void MethodTableCache::ReverseLeftMost (Node *root) { if (root) { if (root->left) ReverseLeftMost(root->left); if (root->right) ReverseLeftMost(root->right); delete root; } } void MethodTableCache::Clear() { Node *root = head; head = NULL; ReverseLeftMost (root); } MethodTableCache g_special_mtCache; size_t Align (size_t nbytes) { return (nbytes + ALIGNCONST) & ~ALIGNCONST; } size_t AlignLarge(size_t nbytes) { return (nbytes + ALIGNCONSTLARGE) & ~ALIGNCONSTLARGE; } /**********************************************************************\ * Routine Description: * * * * Print the gc heap info. * * * \**********************************************************************/ void GCPrintGenerationInfo(const DacpGcHeapDetails &heap) { UINT n; for (n = 0; n <= GetMaxGeneration(); n ++) { if (IsInterrupt()) return; ExtOut("generation %d starts at 0x%p\n", n, SOS_PTR(heap.generation_table[n].allocation_start)); } // We also need to look at the gen0 alloc context. ExtOut("ephemeral segment allocation context: "); if (heap.generation_table[0].allocContextPtr) { ExtOut("(0x%p, 0x%p)\n", SOS_PTR(heap.generation_table[0].allocContextPtr), SOS_PTR(heap.generation_table[0].allocContextLimit + Align(min_obj_size))); } else { ExtOut("none\n"); } } void GCPrintSegmentInfo(const DacpGcHeapDetails &heap, DWORD_PTR &total_size) { DWORD_PTR dwAddrSeg; DacpHeapSegmentData segment; dwAddrSeg = (DWORD_PTR)heap.generation_table[GetMaxGeneration()].start_segment; total_size = 0; // the loop below will terminate, because we retrieved at most nMaxHeapSegmentCount segments while (dwAddrSeg != (DWORD_PTR)heap.generation_table[0].start_segment) { if (IsInterrupt()) return; if (segment.Request(g_sos, dwAddrSeg, heap) != S_OK) { ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddrSeg)); return; } ExtOut("%p %p %p 0x%" POINTERSIZE_TYPE "x(%" POINTERSIZE_TYPE "d)\n", SOS_PTR(dwAddrSeg), SOS_PTR(segment.mem), SOS_PTR(segment.allocated), (ULONG_PTR)(segment.allocated - segment.mem), (ULONG_PTR)(segment.allocated - segment.mem)); total_size += (DWORD_PTR) (segment.allocated - segment.mem); dwAddrSeg = (DWORD_PTR)segment.next; } if (segment.Request(g_sos, dwAddrSeg, heap) != S_OK) { ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddrSeg)); return; } DWORD_PTR end = (DWORD_PTR)heap.alloc_allocated; ExtOut("%p %p %p 0x%" POINTERSIZE_TYPE "x(%" POINTERSIZE_TYPE "d)\n", SOS_PTR(dwAddrSeg), SOS_PTR(segment.mem), SOS_PTR(end), (ULONG_PTR)(end - (DWORD_PTR)segment.mem), (ULONG_PTR)(end - (DWORD_PTR)segment.mem)); total_size += end - (DWORD_PTR)segment.mem; } void GCPrintLargeHeapSegmentInfo(const DacpGcHeapDetails &heap, DWORD_PTR &total_size) { DWORD_PTR dwAddrSeg; DacpHeapSegmentData segment; dwAddrSeg = (DWORD_PTR)heap.generation_table[GetMaxGeneration()+1].start_segment; // total_size = 0; // the loop below will terminate, because we retrieved at most nMaxHeapSegmentCount segments while (dwAddrSeg != NULL) { if (IsInterrupt()) return; if (segment.Request(g_sos, dwAddrSeg, heap) != S_OK) { ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddrSeg)); return; } ExtOut("%p %p %p 0x%" POINTERSIZE_TYPE "x(%" POINTERSIZE_TYPE "d)\n", SOS_PTR(dwAddrSeg), SOS_PTR(segment.mem), SOS_PTR(segment.allocated), (ULONG_PTR)(segment.allocated - segment.mem), (ULONG_PTR)(segment.allocated - segment.mem)); total_size += (DWORD_PTR) (segment.allocated - segment.mem); dwAddrSeg = (DWORD_PTR)segment.next; } } void GCHeapInfo(const DacpGcHeapDetails &heap, DWORD_PTR &total_size) { GCPrintGenerationInfo(heap); ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n", "segment", "begin", "allocated", "size"); GCPrintSegmentInfo(heap, total_size); ExtOut("Large object heap starts at 0x%p\n", SOS_PTR(heap.generation_table[GetMaxGeneration()+1].allocation_start)); ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n", "segment", "begin", "allocated", "size"); GCPrintLargeHeapSegmentInfo(heap,total_size); } BOOL GCObjInGeneration(TADDR taddrObj, const DacpGcHeapDetails &heap, const TADDR_SEGINFO& /*seg*/, int& gen, TADDR_RANGE& allocCtx) { gen = -1; for (UINT n = 0; n <= GetMaxGeneration(); n ++) { if (taddrObj >= TO_TADDR(heap.generation_table[n].allocation_start)) { gen = n; break; } } // We also need to look at the gen0 alloc context. if (heap.generation_table[0].allocContextPtr && taddrObj >= TO_TADDR(heap.generation_table[0].allocContextPtr) && taddrObj < TO_TADDR(heap.generation_table[0].allocContextLimit) + Align(min_obj_size)) { gen = 0; allocCtx.start = (TADDR)heap.generation_table[0].allocContextPtr; allocCtx.end = (TADDR)heap.generation_table[0].allocContextLimit; } else { allocCtx.start = allocCtx.end = 0; } return (gen != -1); } BOOL GCObjInSegment(TADDR taddrObj, const DacpGcHeapDetails &heap, TADDR_SEGINFO& rngSeg, int& gen, TADDR_RANGE& allocCtx) { TADDR taddrSeg; DacpHeapSegmentData dacpSeg; taddrSeg = (TADDR)heap.generation_table[GetMaxGeneration()].start_segment; // the loop below will terminate, because we retrieved at most nMaxHeapSegmentCount segments while (taddrSeg != (TADDR)heap.generation_table[0].start_segment) { if (IsInterrupt()) return FALSE; if (dacpSeg.Request(g_sos, taddrSeg, heap) != S_OK) { ExtOut("Error requesting heap segment %p\n", SOS_PTR(taddrSeg)); return FALSE; } if (taddrObj >= TO_TADDR(dacpSeg.mem) && taddrObj < TO_TADDR(dacpSeg.allocated)) { rngSeg.segAddr = (TADDR)dacpSeg.segmentAddr; rngSeg.start = (TADDR)dacpSeg.mem; rngSeg.end = (TADDR)dacpSeg.allocated; gen = 2; allocCtx.start = allocCtx.end = 0; return TRUE; } taddrSeg = (TADDR)dacpSeg.next; } // the ephemeral segment if (dacpSeg.Request(g_sos, taddrSeg, heap) != S_OK) { ExtOut("Error requesting heap segment %p\n", SOS_PTR(taddrSeg)); return FALSE; } if (taddrObj >= TO_TADDR(dacpSeg.mem) && taddrObj < TO_TADDR(heap.alloc_allocated)) { if (GCObjInGeneration(taddrObj, heap, rngSeg, gen, allocCtx)) { rngSeg.segAddr = (TADDR)dacpSeg.segmentAddr; rngSeg.start = (TADDR)dacpSeg.mem; rngSeg.end = (TADDR)heap.alloc_allocated; return TRUE; } } return FALSE; } BOOL GCObjInLargeSegment(TADDR taddrObj, const DacpGcHeapDetails &heap, TADDR_SEGINFO& rngSeg) { TADDR taddrSeg; DacpHeapSegmentData dacpSeg; taddrSeg = (TADDR)heap.generation_table[GetMaxGeneration()+1].start_segment; // the loop below will terminate, because we retrieved at most nMaxHeapSegmentCount segments while (taddrSeg != NULL) { if (IsInterrupt()) return FALSE; if (dacpSeg.Request(g_sos, taddrSeg, heap) != S_OK) { ExtOut("Error requesting heap segment %p\n", SOS_PTR(taddrSeg)); return FALSE; } if (taddrObj >= TO_TADDR(dacpSeg.mem) && taddrObj && taddrObj < TO_TADDR(dacpSeg.allocated)) { rngSeg.segAddr = (TADDR)dacpSeg.segmentAddr; rngSeg.start = (TADDR)dacpSeg.mem; rngSeg.end = (TADDR)dacpSeg.allocated; return TRUE; } taddrSeg = (TADDR)dacpSeg.next; } return FALSE; } BOOL GCObjInHeap(TADDR taddrObj, const DacpGcHeapDetails &heap, TADDR_SEGINFO& rngSeg, int& gen, TADDR_RANGE& allocCtx, BOOL &bLarge) { if (GCObjInSegment(taddrObj, heap, rngSeg, gen, allocCtx)) { bLarge = FALSE; return TRUE; } if (GCObjInLargeSegment(taddrObj, heap, rngSeg)) { bLarge = TRUE; gen = GetMaxGeneration()+1; allocCtx.start = allocCtx.end = 0; return TRUE; } return FALSE; } #ifndef FEATURE_PAL // this function updates genUsage to reflect statistics from the range defined by [start, end) void GCGenUsageStats(TADDR start, TADDR end, const std::unordered_set<TADDR> &liveObjs, const DacpGcHeapDetails &heap, BOOL bLarge, const AllocInfo *pAllocInfo, GenUsageStat *genUsage) { // if this is an empty segment or generation return if (start >= end) { return; } // otherwise it should start with a valid object _ASSERTE(sos::IsObject(start)); // update the "allocd" field genUsage->allocd += end - start; size_t objSize = 0; for (TADDR taddrObj = start; taddrObj < end; taddrObj += objSize) { TADDR taddrMT; move_xp(taddrMT, taddrObj); taddrMT &= ~3; // skip allocation contexts if (!bLarge) { // Is this the beginning of an allocation context? int i; for (i = 0; i < pAllocInfo->num; i ++) { if (taddrObj == (TADDR)pAllocInfo->array[i].alloc_ptr) { ExtDbgOut("Skipping allocation context: [%#p-%#p)\n", SOS_PTR(pAllocInfo->array[i].alloc_ptr), SOS_PTR(pAllocInfo->array[i].alloc_limit)); taddrObj = (TADDR)pAllocInfo->array[i].alloc_limit + Align(min_obj_size); break; } } if (i < pAllocInfo->num) { // we already adjusted taddrObj, so reset objSize objSize = 0; continue; } // We also need to look at the gen0 alloc context. if (taddrObj == (DWORD_PTR) heap.generation_table[0].allocContextPtr) { taddrObj = (DWORD_PTR) heap.generation_table[0].allocContextLimit + Align(min_obj_size); // we already adjusted taddrObj, so reset objSize objSize = 0; continue; } // Are we at the end of gen 0? if (taddrObj == end - Align(min_obj_size)) { objSize = 0; break; } } BOOL bContainsPointers; BOOL bMTOk = GetSizeEfficient(taddrObj, taddrMT, bLarge, objSize, bContainsPointers); if (!bMTOk) { ExtErr("bad object: %#p - bad MT %#p\n", SOS_PTR(taddrObj), SOS_PTR(taddrMT)); // set objSize to size_t to look for the next valid MT objSize = sizeof(TADDR); continue; } // at this point we should have a valid objSize, and there whould be no // integer overflow when moving on to next object in heap _ASSERTE(objSize > 0 && taddrObj < taddrObj + objSize); if (objSize == 0 || taddrObj > taddrObj + objSize) { break; } if (IsMTForFreeObj(taddrMT)) { genUsage->freed += objSize; } else if (!(liveObjs.empty()) && liveObjs.find(taddrObj) == liveObjs.end()) { genUsage->unrooted += objSize; } } } #endif // !FEATURE_PAL BOOL GCHeapUsageStats(const DacpGcHeapDetails& heap, BOOL bIncUnreachable, HeapUsageStat *hpUsage) { memset(hpUsage, 0, sizeof(*hpUsage)); AllocInfo allocInfo; allocInfo.Init(); // 1. Start with small object segments TADDR taddrSeg; DacpHeapSegmentData dacpSeg; taddrSeg = (TADDR)heap.generation_table[GetMaxGeneration()].start_segment; #ifndef FEATURE_PAL // this will create the bitmap of rooted objects only if bIncUnreachable is true GCRootImpl gcroot; std::unordered_set<TADDR> emptyLiveObjs; const std::unordered_set<TADDR> &liveObjs = (bIncUnreachable ? gcroot.GetLiveObjects() : emptyLiveObjs); // 1a. enumerate all non-ephemeral segments while (taddrSeg != (TADDR)heap.generation_table[0].start_segment) { if (IsInterrupt()) return FALSE; if (dacpSeg.Request(g_sos, taddrSeg, heap) != S_OK) { ExtErr("Error requesting heap segment %p\n", SOS_PTR(taddrSeg)); return FALSE; } GCGenUsageStats((TADDR)dacpSeg.mem, (TADDR)dacpSeg.allocated, liveObjs, heap, FALSE, &allocInfo, &hpUsage->genUsage[2]); taddrSeg = (TADDR)dacpSeg.next; } #endif // 1b. now handle the ephemeral segment if (dacpSeg.Request(g_sos, taddrSeg, heap) != S_OK) { ExtErr("Error requesting heap segment %p\n", SOS_PTR(taddrSeg)); return FALSE; } TADDR endGen = TO_TADDR(heap.alloc_allocated); for (UINT n = 0; n <= GetMaxGeneration(); n ++) { TADDR startGen; // gen 2 starts at the beginning of the segment if (n == GetMaxGeneration()) { startGen = TO_TADDR(dacpSeg.mem); } else { startGen = TO_TADDR(heap.generation_table[n].allocation_start); } #ifndef FEATURE_PAL GCGenUsageStats(startGen, endGen, liveObjs, heap, FALSE, &allocInfo, &hpUsage->genUsage[n]); #endif endGen = startGen; } // 2. Now process LOH taddrSeg = (TADDR) heap.generation_table[GetMaxGeneration()+1].start_segment; while (taddrSeg != NULL) { if (IsInterrupt()) return FALSE; if (dacpSeg.Request(g_sos, taddrSeg, heap) != S_OK) { ExtErr("Error requesting heap segment %p\n", SOS_PTR(taddrSeg)); return FALSE; } #ifndef FEATURE_PAL GCGenUsageStats((TADDR) dacpSeg.mem, (TADDR) dacpSeg.allocated, liveObjs, heap, TRUE, NULL, &hpUsage->genUsage[3]); #endif taddrSeg = (TADDR)dacpSeg.next; } return TRUE; } DWORD GetNumComponents(TADDR obj) { // The number of components is always the second pointer in the object. DWORD Value = NULL; HRESULT hr = MOVE(Value, obj + sizeof(size_t)); // If we fail to read out the number of components, let's assume 0 so we don't try to // read further data from the object. if (FAILED(hr)) return 0; // The component size on a String does not contain the trailing NULL character, // so we must add that ourselves. if(IsStringObject(obj)) return Value+1; return Value; } static MethodTableInfo* GetMethodTableInfo(DWORD_PTR dwAddrMethTable) { // Remove lower bits in case we are in mark phase dwAddrMethTable = dwAddrMethTable & ~3; MethodTableInfo* info = g_special_mtCache.Lookup(dwAddrMethTable); if (!info->IsInitialized()) // An uninitialized entry { // this is the first time we see this method table, so we need to get the information // from the target DacpMethodTableData dmtd; // see code:ClrDataAccess::RequestMethodTableData for details if (dmtd.Request(g_sos, dwAddrMethTable) != S_OK) return NULL; info->BaseSize = dmtd.BaseSize; info->ComponentSize = dmtd.ComponentSize; info->bContainsPointers = dmtd.bContainsPointers; // The following request doesn't work on older runtimes. For those, the // objects would just look like non-collectible, which is acceptable. DacpMethodTableCollectibleData dmtcd; if (SUCCEEDED(dmtcd.Request(g_sos, dwAddrMethTable))) { info->bCollectible = dmtcd.bCollectible; info->LoaderAllocatorObjectHandle = TO_TADDR(dmtcd.LoaderAllocatorObjectHandle); } } return info; } BOOL GetSizeEfficient(DWORD_PTR dwAddrCurrObj, DWORD_PTR dwAddrMethTable, BOOL bLarge, size_t& s, BOOL& bContainsPointers) { MethodTableInfo* info = GetMethodTableInfo(dwAddrMethTable); if (info == NULL) { return FALSE; } bContainsPointers = info->bContainsPointers; s = info->BaseSize; if (info->ComponentSize) { // this is an array, so the size has to include the size of the components. We read the number // of components from the target and multiply by the component size to get the size. s += info->ComponentSize*GetNumComponents(dwAddrCurrObj); } // On x64 we do an optimization to save 4 bytes in almost every string we create // IMPORTANT: This cannot be done in ObjectSize, which is a wrapper to this function, // because we must Align only after these changes are made #ifdef _TARGET_WIN64_ // Pad to min object size if necessary if (s < min_obj_size) s = min_obj_size; #endif // _TARGET_WIN64_ s = (bLarge ? AlignLarge(s) : Align (s)); return TRUE; } BOOL GetCollectibleDataEfficient(DWORD_PTR dwAddrMethTable, BOOL& bCollectible, TADDR& loaderAllocatorObjectHandle) { MethodTableInfo* info = GetMethodTableInfo(dwAddrMethTable); if (info == NULL) { return FALSE; } bCollectible = info->bCollectible; loaderAllocatorObjectHandle = info->LoaderAllocatorObjectHandle; return TRUE; } // This function expects stat to be valid, and ready to get statistics. void GatherOneHeapFinalization(DacpGcHeapDetails& heapDetails, HeapStat *stat, BOOL bAllReady, BOOL bShort) { DWORD_PTR dwAddr=0; UINT m; if (!bShort) { for (m = 0; m <= GetMaxGeneration(); m ++) { if (IsInterrupt()) return; ExtOut("generation %d has %d finalizable objects ", m, (SegQueueLimit(heapDetails,gen_segment(m)) - SegQueue(heapDetails,gen_segment(m))) / sizeof(size_t)); ExtOut ("(%p->%p)\n", SOS_PTR(SegQueue(heapDetails,gen_segment(m))), SOS_PTR(SegQueueLimit(heapDetails,gen_segment(m)))); } } #ifndef FEATURE_PAL if (bAllReady) { if (!bShort) { ExtOut ("Finalizable but not rooted: "); } TADDR rngStart = (TADDR)SegQueue(heapDetails, gen_segment(GetMaxGeneration())); TADDR rngEnd = (TADDR)SegQueueLimit(heapDetails, gen_segment(0)); PrintNotReachableInRange(rngStart, rngEnd, TRUE, bAllReady ? stat : NULL, bShort); } #endif if (!bShort) { ExtOut ("Ready for finalization %d objects ", (SegQueueLimit(heapDetails,FinalizerListSeg)-SegQueue(heapDetails,CriticalFinalizerListSeg)) / sizeof(size_t)); ExtOut ("(%p->%p)\n", SOS_PTR(SegQueue(heapDetails,CriticalFinalizerListSeg)), SOS_PTR(SegQueueLimit(heapDetails,FinalizerListSeg))); } // if bAllReady we only count objects that are ready for finalization, // otherwise we count all finalizable objects. TADDR taddrLowerLimit = (bAllReady ? (TADDR)SegQueue(heapDetails, CriticalFinalizerListSeg) : (DWORD_PTR)SegQueue(heapDetails, gen_segment(GetMaxGeneration()))); for (dwAddr = taddrLowerLimit; dwAddr < (DWORD_PTR)SegQueueLimit(heapDetails, FinalizerListSeg); dwAddr += sizeof (dwAddr)) { if (IsInterrupt()) { return; } DWORD_PTR objAddr = NULL, MTAddr = NULL; if (SUCCEEDED(MOVE(objAddr, dwAddr)) && SUCCEEDED(GetMTOfObject(objAddr, &MTAddr)) && MTAddr) { if (bShort) { DMLOut("%s\n", DMLObject(objAddr)); } else { size_t s = ObjectSize(objAddr); stat->Add(MTAddr, (DWORD)s); } } } } BOOL GCHeapTraverse(const DacpGcHeapDetails &heap, AllocInfo* pallocInfo, VISITGCHEAPFUNC pFunc, LPVOID token, BOOL verify) { DWORD_PTR begin_youngest; DWORD_PTR end_youngest; begin_youngest = (DWORD_PTR)heap.generation_table[0].allocation_start; DWORD_PTR dwAddr = (DWORD_PTR)heap.ephemeral_heap_segment; DacpHeapSegmentData segment; end_youngest = (DWORD_PTR)heap.alloc_allocated; DWORD_PTR dwAddrSeg = (DWORD_PTR)heap.generation_table[GetMaxGeneration()].start_segment; dwAddr = dwAddrSeg; if (segment.Request(g_sos, dwAddr, heap) != S_OK) { ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddr)); return FALSE; } // DWORD_PTR dwAddrCurrObj = (DWORD_PTR)heap.generation_table[GetMaxGeneration()].allocation_start; DWORD_PTR dwAddrCurrObj = (DWORD_PTR)segment.mem; size_t s, sPrev=0; BOOL bPrevFree=FALSE; DWORD_PTR dwAddrMethTable; DWORD_PTR dwAddrPrevObj=0; while(1) { if (IsInterrupt()) { ExtOut("<heap walk interrupted>\n"); return FALSE; } DWORD_PTR end_of_segment = (DWORD_PTR)segment.allocated; if (dwAddrSeg == (DWORD_PTR)heap.ephemeral_heap_segment) { end_of_segment = end_youngest; if (dwAddrCurrObj - SIZEOF_OBJHEADER == end_youngest - Align(min_obj_size)) break; } if (dwAddrCurrObj >= (DWORD_PTR)end_of_segment) { if (dwAddrCurrObj > (DWORD_PTR)end_of_segment) { ExtOut ("curr_object: %p > heap_segment_allocated (seg: %p)\n", SOS_PTR(dwAddrCurrObj), SOS_PTR(dwAddrSeg)); if (dwAddrPrevObj) { ExtOut ("Last good object: %p\n", SOS_PTR(dwAddrPrevObj)); } return FALSE; } dwAddrSeg = (DWORD_PTR)segment.next; if (dwAddrSeg) { dwAddr = dwAddrSeg; if (segment.Request(g_sos, dwAddr, heap) != S_OK) { ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddr)); return FALSE; } dwAddrCurrObj = (DWORD_PTR)segment.mem; continue; } else break; // Done Verifying Heap } if (dwAddrSeg == (DWORD_PTR)heap.ephemeral_heap_segment && dwAddrCurrObj >= end_youngest) { if (dwAddrCurrObj > end_youngest) { // prev_object length is too long ExtOut("curr_object: %p > end_youngest: %p\n", SOS_PTR(dwAddrCurrObj), SOS_PTR(end_youngest)); if (dwAddrPrevObj) { DMLOut("Last good object: %s\n", DMLObject(dwAddrPrevObj)); } return FALSE; } return FALSE; } if (FAILED(GetMTOfObject(dwAddrCurrObj, &dwAddrMethTable))) { return FALSE; } dwAddrMethTable = dwAddrMethTable & ~3; if (dwAddrMethTable == 0) { // Is this the beginning of an allocation context? int i; for (i = 0; i < pallocInfo->num; i ++) { if (dwAddrCurrObj == (DWORD_PTR)pallocInfo->array[i].alloc_ptr) { dwAddrCurrObj = (DWORD_PTR)pallocInfo->array[i].alloc_limit + Align(min_obj_size); break; } } if (i < pallocInfo->num) continue; // We also need to look at the gen0 alloc context. if (dwAddrCurrObj == (DWORD_PTR) heap.generation_table[0].allocContextPtr) { dwAddrCurrObj = (DWORD_PTR) heap.generation_table[0].allocContextLimit + Align(min_obj_size); continue; } } BOOL bContainsPointers; BOOL bMTOk = GetSizeEfficient(dwAddrCurrObj, dwAddrMethTable, FALSE, s, bContainsPointers); if (verify && bMTOk) bMTOk = VerifyObject (heap, dwAddrCurrObj, dwAddrMethTable, s, TRUE); if (!bMTOk) { DMLOut("curr_object: %s\n", DMLListNearObj(dwAddrCurrObj)); if (dwAddrPrevObj) DMLOut("Last good object: %s\n", DMLObject(dwAddrPrevObj)); ExtOut ("----------------\n"); return FALSE; } pFunc (dwAddrCurrObj, s, dwAddrMethTable, token); // We believe we did this alignment in ObjectSize above. assert((s & ALIGNCONST) == 0); dwAddrPrevObj = dwAddrCurrObj; sPrev = s; bPrevFree = IsMTForFreeObj(dwAddrMethTable); dwAddrCurrObj += s; } // Now for the large object generation: dwAddrSeg = (DWORD_PTR)heap.generation_table[GetMaxGeneration()+1].start_segment; dwAddr = dwAddrSeg; if (segment.Request(g_sos, dwAddr, heap) != S_OK) { ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddr)); return FALSE; } // dwAddrCurrObj = (DWORD_PTR)heap.generation_table[GetMaxGeneration()+1].allocation_start; dwAddrCurrObj = (DWORD_PTR)segment.mem; dwAddrPrevObj=0; while(1) { if (IsInterrupt()) { ExtOut("<heap traverse interrupted>\n"); return FALSE; } DWORD_PTR end_of_segment = (DWORD_PTR)segment.allocated; if (dwAddrCurrObj >= (DWORD_PTR)end_of_segment) { if (dwAddrCurrObj > (DWORD_PTR)end_of_segment) { ExtOut("curr_object: %p > heap_segment_allocated (seg: %p)\n", SOS_PTR(dwAddrCurrObj), SOS_PTR(dwAddrSeg)); if (dwAddrPrevObj) { ExtOut("Last good object: %p\n", SOS_PTR(dwAddrPrevObj)); } return FALSE; } dwAddrSeg = (DWORD_PTR)segment.next; if (dwAddrSeg) { dwAddr = dwAddrSeg; if (segment.Request(g_sos, dwAddr, heap) != S_OK) { ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddr)); return FALSE; } dwAddrCurrObj = (DWORD_PTR)segment.mem; continue; } else break; // Done Verifying Heap } if (FAILED(GetMTOfObject(dwAddrCurrObj, &dwAddrMethTable))) { return FALSE; } dwAddrMethTable = dwAddrMethTable & ~3; BOOL bContainsPointers; BOOL bMTOk = GetSizeEfficient(dwAddrCurrObj, dwAddrMethTable, TRUE, s, bContainsPointers); if (verify && bMTOk) bMTOk = VerifyObject (heap, dwAddrCurrObj, dwAddrMethTable, s, TRUE); if (!bMTOk) { DMLOut("curr_object: %s\n", DMLListNearObj(dwAddrCurrObj)); if (dwAddrPrevObj) DMLOut("Last good object: %s\n", dwAddrPrevObj); ExtOut ("----------------\n"); return FALSE; } pFunc (dwAddrCurrObj, s, dwAddrMethTable, token); // We believe we did this alignment in ObjectSize above. assert((s & ALIGNCONSTLARGE) == 0); dwAddrPrevObj = dwAddrCurrObj; dwAddrCurrObj += s; } return TRUE; } BOOL GCHeapsTraverse(VISITGCHEAPFUNC pFunc, LPVOID token, BOOL verify) { // Obtain allocation context for each managed thread. AllocInfo allocInfo; allocInfo.Init(); if (!IsServerBuild()) { DacpGcHeapDetails heapDetails; if (heapDetails.Request(g_sos) != S_OK) { ExtOut("Error requesting gc heap details\n"); return FALSE; } return GCHeapTraverse (heapDetails, &allocInfo, pFunc, token, verify); } else { DacpGcHeapData gcheap; if (gcheap.Request(g_sos) != S_OK) { ExtOut("Error requesting GC Heap data\n"); return FALSE; } DWORD dwAllocSize; DWORD dwNHeaps = gcheap.HeapCount; if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), dwNHeaps, dwAllocSize)) { ExtOut("Failed to get GCHeaps: integer overflow error\n"); return FALSE; } CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize); if (g_sos->GetGCHeapList(dwNHeaps, heapAddrs, NULL) != S_OK) { ExtOut("Failed to get GCHeaps\n"); return FALSE; } DWORD n; for (n = 0; n < dwNHeaps; n ++) { DacpGcHeapDetails heapDetails; if (heapDetails.Request(g_sos, heapAddrs[n]) != S_OK) { ExtOut("Error requesting details\n"); return FALSE; } if (!GCHeapTraverse (heapDetails, &allocInfo, pFunc, token, verify)) { ExtOut("Traversing a gc heap failed\n"); return FALSE; } } } return TRUE; } GCHeapSnapshot::GCHeapSnapshot() { m_isBuilt = FALSE; m_heapDetails = NULL; } /////////////////////////////////////////////////////////// SegmentLookup::SegmentLookup() { m_iSegmentsSize = m_iSegmentCount = 0; m_segments = new DacpHeapSegmentData[nSegLookupStgIncrement]; if (m_segments == NULL) { ReportOOM(); } else { m_iSegmentsSize = nSegLookupStgIncrement; } } BOOL SegmentLookup::AddSegment(DacpHeapSegmentData *pData) { // appends the address of a new (initialized) instance of DacpHeapSegmentData to the list of segments // (m_segments) adding space for a segment when necessary. // @todo Microsoft: The field name m_iSegmentSize is a little misleading. It's not the size in bytes, // but the number of elements allocated for the array. It probably should have been named something like // m_iMaxSegments instead. if (m_iSegmentCount >= m_iSegmentsSize) { // expand buffer--allocate enough space to hold the elements we already have plus nSegLookupStgIncrement // more elements DacpHeapSegmentData *pNewBuffer = new DacpHeapSegmentData[m_iSegmentsSize+nSegLookupStgIncrement]; if (pNewBuffer==NULL) return FALSE; // copy the old elements into the new array memcpy(pNewBuffer, m_segments, sizeof(DacpHeapSegmentData)*m_iSegmentsSize); // record the new number of elements available m_iSegmentsSize+=nSegLookupStgIncrement; // delete the old array delete [] m_segments; // set m_segments to point to the new array m_segments = pNewBuffer; } // add pData to the array m_segments[m_iSegmentCount++] = *pData; return TRUE; } SegmentLookup::~SegmentLookup() { if (m_segments) { delete [] m_segments; m_segments = NULL; } } void SegmentLookup::Clear() { m_iSegmentCount = 0; } CLRDATA_ADDRESS SegmentLookup::GetHeap(CLRDATA_ADDRESS object, BOOL& bFound) { CLRDATA_ADDRESS ret = NULL; bFound = FALSE; // Visit our segments for (int i=0; i<m_iSegmentCount; i++) { if (TO_TADDR(m_segments[i].mem) <= TO_TADDR(object) && TO_TADDR(m_segments[i].highAllocMark) > TO_TADDR(object)) { ret = m_segments[i].gc_heap; bFound = TRUE; break; } } return ret; } /////////////////////////////////////////////////////////////////////////// BOOL GCHeapSnapshot::Build() { Clear(); m_isBuilt = FALSE; ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /// 1. Get some basic information such as the heap type (SVR or WKS), how many heaps there are, mode and max generation /// (See code:ClrDataAccess::RequestGCHeapData) ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (m_gcheap.Request(g_sos) != S_OK) { ExtOut("Error requesting GC Heap data\n"); return FALSE; } ArrayHolder<CLRDATA_ADDRESS> heapAddrs = NULL; ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /// 2. Get a list of the addresses of the heaps when we have multiple heaps in server mode ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (m_gcheap.bServerMode) { UINT AllocSize; // allocate an array to hold the starting addresses of each heap when we're in server mode if (!ClrSafeInt<UINT>::multiply(sizeof(CLRDATA_ADDRESS), m_gcheap.HeapCount, AllocSize) || (heapAddrs = new CLRDATA_ADDRESS [m_gcheap.HeapCount]) == NULL) { ReportOOM(); return FALSE; } // and initialize it with their addresses (see code:ClrDataAccess::RequestGCHeapList // for details) if (g_sos->GetGCHeapList(m_gcheap.HeapCount, heapAddrs, NULL) != S_OK) { ExtOut("Failed to get GCHeaps\n"); return FALSE; } } ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /// 3. Get some necessary information about each heap, such as the card table location, the generation /// table, the heap bounds, etc., and retrieve the heap segments ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // allocate an array to hold the information m_heapDetails = new DacpGcHeapDetails[m_gcheap.HeapCount]; if (m_heapDetails == NULL) { ReportOOM(); return FALSE; } // get the heap information for each heap // See code:ClrDataAccess::RequestGCHeapDetails for details for (UINT n = 0; n < m_gcheap.HeapCount; n ++) { if (m_gcheap.bServerMode) { if (m_heapDetails[n].Request(g_sos, heapAddrs[n]) != S_OK) { ExtOut("Error requesting details\n"); return FALSE; } } else { if (m_heapDetails[n].Request(g_sos) != S_OK) { ExtOut("Error requesting details\n"); return FALSE; } } // now get information about the heap segments for this heap if (!AddSegments(m_heapDetails[n])) { ExtOut("Failed to retrieve segments for gc heap\n"); return FALSE; } } m_isBuilt = TRUE; return TRUE; } BOOL GCHeapSnapshot::AddSegments(DacpGcHeapDetails& details) { int n = 0; DacpHeapSegmentData segment; // This array of two addresses gives us access to all the segments. The generation segments are linked // to each other, starting with the maxGeneration segment. The second address gives us the large object heap. CLRDATA_ADDRESS AddrSegs[] = { details.generation_table[GetMaxGeneration()].start_segment, details.generation_table[GetMaxGeneration()+1].start_segment // large object heap }; // this loop will get information for all the heap segments in this heap. The outer loop iterates once // for the "normal" generation segments and once for the large object heap. The inner loop follows the chain // of segments rooted at AddrSegs[i] for (unsigned int i = 0; i < sizeof(AddrSegs)/sizeof(AddrSegs[0]); ++i) { CLRDATA_ADDRESS AddrSeg = AddrSegs[i]; while (AddrSeg != NULL) { if (IsInterrupt()) { return FALSE; } // Initialize segment by copying fields from the target's heap segment at AddrSeg. // See code:ClrDataAccess::RequestGCHeapSegment for details. if (segment.Request(g_sos, AddrSeg, details) != S_OK) { ExtOut("Error requesting heap segment %p\n", SOS_PTR(AddrSeg)); return FALSE; } if (n++ > nMaxHeapSegmentCount) // that would be insane { ExtOut("More than %d heap segments, there must be an error\n", nMaxHeapSegmentCount); return FALSE; } // add the new segment to the array of segments. This will expand the array if necessary if (!m_segments.AddSegment(&segment)) { ExtOut("strike: Failed to store segment\n"); return FALSE; } // get the next segment in the chain AddrSeg = segment.next; } } return TRUE; } void GCHeapSnapshot::Clear() { if (m_heapDetails != NULL) { delete [] m_heapDetails; m_heapDetails = NULL; } m_segments.Clear(); m_isBuilt = FALSE; } GCHeapSnapshot g_snapshot; DacpGcHeapDetails *GCHeapSnapshot::GetHeap(CLRDATA_ADDRESS objectPointer) { // We need bFound because heap will be NULL if we are Workstation Mode. // We still need a way to know if the address was found in our segment // list. BOOL bFound = FALSE; CLRDATA_ADDRESS heap = m_segments.GetHeap(objectPointer, bFound); if (heap) { for (UINT i=0; i<m_gcheap.HeapCount; i++) { if (m_heapDetails[i].heapAddr == heap) return m_heapDetails + i; } } else if (!m_gcheap.bServerMode) { if (bFound) { return m_heapDetails; } } // Not found return NULL; } // TODO: Do we need to handle the LOH here? int GCHeapSnapshot::GetGeneration(CLRDATA_ADDRESS objectPointer) { DacpGcHeapDetails *pDetails = GetHeap(objectPointer); if (pDetails == NULL) { ExtOut("Object %p has no generation\n", SOS_PTR(objectPointer)); return 0; } TADDR taObj = TO_TADDR(objectPointer); // The DAC doesn't fill the generation table with true CLRDATA_ADDRESS values // but rather with ULONG64 values (i.e. non-sign-extended 64-bit values) // We use the TO_TADDR below to ensure we won't break if this will ever // be fixed in the DAC. if (taObj >= TO_TADDR(pDetails->generation_table[0].allocation_start) && taObj <= TO_TADDR(pDetails->alloc_allocated)) return 0; if (taObj >= TO_TADDR(pDetails->generation_table[1].allocation_start) && taObj <= TO_TADDR(pDetails->generation_table[0].allocation_start)) return 1; return 2; } DWORD_PTR g_trav_totalSize = 0; DWORD_PTR g_trav_wastedSize = 0; void LoaderHeapTraverse(CLRDATA_ADDRESS blockData,size_t blockSize,BOOL blockIsCurrentBlock) { DWORD_PTR dwAddr1; DWORD_PTR curSize = 0; char ch; for (dwAddr1 = (DWORD_PTR)blockData; dwAddr1 < (DWORD_PTR)blockData + blockSize; dwAddr1 += OSPageSize()) { if (IsInterrupt()) break; if (SafeReadMemory(dwAddr1, &ch, sizeof(ch), NULL)) { curSize += OSPageSize(); } else break; } if (!blockIsCurrentBlock) { g_trav_wastedSize += blockSize - curSize; } g_trav_totalSize += curSize; ExtOut("%p(%x:%x) ", SOS_PTR(blockData), blockSize, curSize); } /**********************************************************************\ * Routine Description: * * * * This function prints out the size for various heaps. * * total - the total size of the heap * * wasted - the amount of size wasted by the heap. * * * \**********************************************************************/ void PrintHeapSize(DWORD_PTR total, DWORD_PTR wasted) { ExtOut("Size: 0x%" POINTERSIZE_TYPE "x (%" POINTERSIZE_TYPE "u) bytes", total, total); if (wasted) ExtOut(" total, 0x%" POINTERSIZE_TYPE "x (%" POINTERSIZE_TYPE "u) bytes wasted", wasted, wasted); ExtOut(".\n"); } /**********************************************************************\ * Routine Description: * * * * This function prints out the size information for the JIT heap. * * * * Returns: The size of this heap. * * * \**********************************************************************/ DWORD_PTR JitHeapInfo() { // walk ExecutionManager__m_pJitList unsigned int count = 0; if (FAILED(g_sos->GetJitManagerList(0, NULL, &count))) { ExtOut("Unable to get JIT info\n"); return 0; } ArrayHolder<DacpJitManagerInfo> pArray = new DacpJitManagerInfo[count]; if (pArray==NULL) { ReportOOM(); return 0; } if (g_sos->GetJitManagerList(count, pArray, NULL) != S_OK) { ExtOut("Unable to get array of JIT Managers\n"); return 0; } DWORD_PTR totalSize = 0; DWORD_PTR wasted = 0; for (unsigned int n=0; n < count; n++) { if (IsInterrupt()) break; if (IsMiIL(pArray[n].codeType)) // JIT { unsigned int heapCount = 0; if (FAILED(g_sos->GetCodeHeapList(pArray[n].managerAddr, 0, NULL, &heapCount))) { ExtOut("Error getting EEJitManager code heaps\n"); break; } if (heapCount > 0) { ArrayHolder<DacpJitCodeHeapInfo> codeHeapInfo = new DacpJitCodeHeapInfo[heapCount]; if (codeHeapInfo == NULL) { ReportOOM(); break; } if (g_sos->GetCodeHeapList(pArray[n].managerAddr, heapCount, codeHeapInfo, NULL) != S_OK) { ExtOut("Unable to get code heap info\n"); break; } for (unsigned int iHeaps = 0; iHeaps < heapCount; iHeaps++) { if (IsInterrupt()) break; if (codeHeapInfo[iHeaps].codeHeapType == CODEHEAP_LOADER) { ExtOut("LoaderCodeHeap: "); totalSize += LoaderHeapInfo(codeHeapInfo[iHeaps].LoaderHeap, &wasted); } else if (codeHeapInfo[iHeaps].codeHeapType == CODEHEAP_HOST) { ExtOut("HostCodeHeap: "); ExtOut("%p ", SOS_PTR(codeHeapInfo[iHeaps].HostData.baseAddr)); DWORD dwSize = (DWORD)(codeHeapInfo[iHeaps].HostData.currentAddr - codeHeapInfo[iHeaps].HostData.baseAddr); PrintHeapSize(dwSize, 0); totalSize += dwSize; } } } } else if (!IsMiNative(pArray[n].codeType)) // ignore native heaps for now { ExtOut("Unknown Jit encountered, ignored\n"); } } ExtOut("Total size: "); PrintHeapSize(totalSize, wasted); return totalSize; } /**********************************************************************\ * Routine Description: * * * * This function prints out the loader heap info for a single AD. * * pLoaderHeapAddr - pointer to the loader heap * * wasted - a pointer to store the number of bytes wasted in this * * VSDHeap (this pointer can be NULL) * * * * Returns: The size of this heap. * * * \**********************************************************************/ DWORD_PTR LoaderHeapInfo(CLRDATA_ADDRESS pLoaderHeapAddr, DWORD_PTR *wasted) { g_trav_totalSize = 0; g_trav_wastedSize = 0; if (pLoaderHeapAddr) g_sos->TraverseLoaderHeap(pLoaderHeapAddr, LoaderHeapTraverse); PrintHeapSize(g_trav_totalSize, g_trav_wastedSize); if (wasted) *wasted += g_trav_wastedSize; return g_trav_totalSize; } /**********************************************************************\ * Routine Description: * * * * This function prints out the heap info for a single VSDHeap. * * name - the name to print * * type - the type of heap * * appDomain - the app domain in which this resides * * wasted - a pointer to store the number of bytes wasted in this * * VSDHeap (this pointer can be NULL) * * * * Returns: The size of this heap. * * * \**********************************************************************/ static DWORD_PTR PrintOneVSDHeap(const char *name, VCSHeapType type, CLRDATA_ADDRESS appDomain, DWORD_PTR *wasted) { g_trav_totalSize = 0; g_trav_wastedSize = 0; ExtOut(name); g_sos->TraverseVirtCallStubHeap(appDomain, type, LoaderHeapTraverse); PrintHeapSize(g_trav_totalSize, g_trav_wastedSize); if (wasted) *wasted += g_trav_wastedSize; return g_trav_totalSize; } /**********************************************************************\ * Routine Description: * * * * This function prints out the heap info for VSDHeaps. * * appDomain - The AppDomain to print info for. * * wasted - a pointer to store the number of bytes wasted in this * * AppDomain (this pointer can be NULL) * * * * Returns: The size of this heap. * * * \**********************************************************************/ DWORD_PTR VSDHeapInfo(CLRDATA_ADDRESS appDomain, DWORD_PTR *wasted) { DWORD_PTR totalSize = 0; if (appDomain) { totalSize += PrintOneVSDHeap(" IndcellHeap: ", IndcellHeap, appDomain, wasted); totalSize += PrintOneVSDHeap(" LookupHeap: ", LookupHeap, appDomain, wasted); totalSize += PrintOneVSDHeap(" ResolveHeap: ", ResolveHeap, appDomain, wasted); totalSize += PrintOneVSDHeap(" DispatchHeap: ", DispatchHeap, appDomain, wasted); totalSize += PrintOneVSDHeap(" CacheEntryHeap: ", CacheEntryHeap, appDomain, wasted); } return totalSize; } /**********************************************************************\ * Routine Description: * * * * This function prints out the heap info for a domain * * name - the name of the domain (to be printed) * * adPtr - a pointer to the AppDomain to print info about * * outSize - a pointer to an int to store the size at (this may be * * NULL) * * outWasted - a pointer to an int to store the number of bytes this * * domain is wasting (this may be NULL) * * * * returns: SUCCESS if we successfully printed out the domain heap * * info, FAILED otherwise; if FAILED, outSize and * * outWasted are untouched. * * * \**********************************************************************/ HRESULT PrintDomainHeapInfo(const char *name, CLRDATA_ADDRESS adPtr, DWORD_PTR *outSize, DWORD_PTR *outWasted) { DacpAppDomainData appDomain; HRESULT hr = appDomain.Request(g_sos, adPtr); if (FAILED(hr)) { ExtOut("Unable to get information for %s.\n", name); return hr; } ExtOut("--------------------------------------\n"); const int column = 19; ExtOut("%s:", name); WhitespaceOut(column - (int)strlen(name) - 1); DMLOut("%s\n", DMLDomain(adPtr)); DWORD_PTR domainHeapSize = 0; DWORD_PTR wasted = 0; ExtOut("LowFrequencyHeap: "); domainHeapSize += LoaderHeapInfo(appDomain.pLowFrequencyHeap, &wasted); ExtOut("HighFrequencyHeap: "); domainHeapSize += LoaderHeapInfo(appDomain.pHighFrequencyHeap, &wasted); ExtOut("StubHeap: "); domainHeapSize += LoaderHeapInfo(appDomain.pStubHeap, &wasted); ExtOut("Virtual Call Stub Heap:\n"); domainHeapSize += VSDHeapInfo(appDomain.AppDomainPtr, &wasted); ExtOut("Total size: "); PrintHeapSize(domainHeapSize, wasted); if (outSize) *outSize += domainHeapSize; if (outWasted) *outWasted += wasted; return hr; } /**********************************************************************\ * Routine Description: * * * * This function prints out the heap info for a list of modules. * * moduleList - an array of modules * * count - the number of modules in moduleList * * type - the type of heap * * outWasted - a pointer to store the number of bytes wasted in this * * heap (this pointer can be NULL) * * * * Returns: The size of this heap. * * * \**********************************************************************/ DWORD_PTR PrintModuleHeapInfo(__out_ecount(count) DWORD_PTR *moduleList, int count, ModuleHeapType type, DWORD_PTR *outWasted) { DWORD_PTR toReturn = 0; DWORD_PTR wasted = 0; if (IsMiniDumpFile()) { ExtOut("<no information>\n"); } else { DWORD_PTR thunkHeapSize = 0; for (int i = 0; i < count; i++) { CLRDATA_ADDRESS addr = moduleList[i]; DacpModuleData dmd; if (dmd.Request(g_sos, addr) != S_OK) { ExtOut("Unable to read module %p\n", SOS_PTR(addr)); } else { DMLOut("Module %s: ", DMLModule(addr)); CLRDATA_ADDRESS heap = type == ModuleHeapType_ThunkHeap ? dmd.pThunkHeap : dmd.pLookupTableHeap; thunkHeapSize += LoaderHeapInfo(heap, &wasted); } } ExtOut("Total size: " WIN86_8SPACES); PrintHeapSize(thunkHeapSize, wasted); toReturn = thunkHeapSize; } if (outWasted) *outWasted += wasted; return toReturn; }
1
11,741
These whitespace only changes make it very difficult to review this change. Is there any way you could separate just the code changes into one PR and the whitespace only fixes into another? Or are they sufficiently merged together at this point? Chrome is having a real tough time rendering these large diffs.
dotnet-diagnostics
cpp
@@ -19,11 +19,13 @@ public abstract class RoomHandler protected final Client client; protected final TheatrePlugin plugin; + protected final TheatreConfig config; - protected RoomHandler(final Client client, final TheatrePlugin plugin) + protected RoomHandler(final Client client, final TheatrePlugin plugin, TheatreConfig config) { this.client = client; this.plugin = plugin; + this.config = config; } public abstract void onStart();
1
package net.runelite.client.plugins.theatre; import java.awt.Color; import java.awt.Font; import java.awt.Graphics2D; import java.awt.Polygon; import java.util.Map; import net.runelite.api.Client; import net.runelite.api.NPC; import net.runelite.api.Perspective; import net.runelite.api.Point; import net.runelite.api.Projectile; import net.runelite.api.coords.LocalPoint; import net.runelite.api.coords.WorldPoint; import net.runelite.client.ui.overlay.OverlayUtil; public abstract class RoomHandler { protected final Client client; protected final TheatrePlugin plugin; protected RoomHandler(final Client client, final TheatrePlugin plugin) { this.client = client; this.plugin = plugin; } public abstract void onStart(); public abstract void onStop(); protected void drawTile2(Graphics2D graphics, WorldPoint point, Color color, int strokeWidth, int outlineAlpha, int fillAlpha) { WorldPoint playerLocation = client.getLocalPlayer().getWorldLocation(); if (point.distanceTo(playerLocation) >= 32) { return; } LocalPoint lp = LocalPoint.fromWorld(client, point); if (lp == null) { return; } Polygon poly = Perspective.getCanvasTileAreaPoly(client, lp, 7); if (poly == null) { return; } //OverlayUtil.renderPolygon(graphics, poly, color); OverlayUtil.drawStrokeAndFillPoly(graphics, color, strokeWidth, outlineAlpha, fillAlpha, poly); } protected void renderProjectiles(Graphics2D graphics, Map<Projectile, String> projectiles) { for (Map.Entry<Projectile, String> entry : projectiles.entrySet()) { int projectileId = entry.getKey().getId(); String text = entry.getValue(); int x = (int) entry.getKey().getX(); int y = (int) entry.getKey().getY(); LocalPoint projectilePoint = new LocalPoint(x, y); Point textLocation = Perspective.getCanvasTextLocation(client, graphics, projectilePoint, text, 0); if (textLocation != null) { if (projectileId == 1607) { // range renderTextLocation(graphics, text, 17, Font.BOLD, new Color(57, 255, 20, 255), textLocation); } else if (projectileId == 1606) { //mage renderTextLocation(graphics, text, 17, Font.BOLD, new Color(64, 224, 208, 255), textLocation); } else { //Orb of death? i hope renderTextLocation(graphics, text, 20, Font.BOLD, Color.WHITE, textLocation); } } } } protected void drawTile(Graphics2D graphics, WorldPoint point, Color color, int strokeWidth, int outlineAlpha, int fillAlpha) { WorldPoint playerLocation = client.getLocalPlayer().getWorldLocation(); OverlayUtil.drawTiles(graphics, client, point, playerLocation, color, strokeWidth, outlineAlpha, fillAlpha); } protected void renderNpcOverlay(Graphics2D graphics, NPC actor, Color color, int outlineWidth, int outlineAlpha, int fillAlpha) { OverlayUtil.renderNpcOverlay(graphics, actor, color, outlineWidth, outlineAlpha, fillAlpha, client); } protected void renderTextLocation(Graphics2D graphics, String txtString, int fontSize, int fontStyle, Color fontColor, Point canvasPoint) { graphics.setFont(new Font("Arial", fontStyle, fontSize)); if (canvasPoint != null) { final Point canvasCenterPoint = new Point(canvasPoint.getX(), canvasPoint.getY()); final Point canvasCenterPointShadow = new Point(canvasPoint.getX() + 1, canvasPoint.getY() + 1); OverlayUtil.renderTextLocation(graphics, canvasCenterPointShadow, txtString, Color.BLACK); OverlayUtil.renderTextLocation(graphics, canvasCenterPoint, txtString, fontColor); } } protected String twoDigitString(long number) { if (number == 0) { return "00"; } if (number / 10 == 0) { return "0" + number; } return String.valueOf(number); } }
1
16,059
why does this suddenly need a config ref
open-osrs-runelite
java
@@ -99,6 +99,10 @@ module.exports = function(config) { customLaunchers: sauceLabs ? sauceLabsLaunchers : travisLaunchers, files: [ + // We can't load this up front because it's ES2015 and we need it only + // for certain tests that run under those conditions. We also can't load + // it via CDN because { included: false } won't work. + { pattern: 'custom-elements-es5-adapter.js', included: false }, { pattern: 'polyfills.js', watched: false }, { pattern: '{browser,shared}/**.js', watched: false } ],
1
/*eslint no-var:0, object-shorthand:0 */ var coverage = String(process.env.COVERAGE)!=='false', ci = String(process.env.CI).match(/^(1|true)$/gi), pullRequest = !String(process.env.TRAVIS_PULL_REQUEST).match(/^(0|false|undefined)$/gi), masterBranch = String(process.env.TRAVIS_BRANCH).match(/^master$/gi), realBrowser = String(process.env.BROWSER).match(/^(1|true)$/gi), sauceLabs = realBrowser && ci && !pullRequest && masterBranch, performance = !coverage && !realBrowser && String(process.env.PERFORMANCE)!=='false', webpack = require('webpack'); var sauceLabsLaunchers = { sl_chrome: { base: 'SauceLabs', browserName: 'chrome', platform: 'Windows 10' }, sl_firefox: { base: 'SauceLabs', browserName: 'firefox', platform: 'Windows 10' }, sl_safari: { base: 'SauceLabs', browserName: 'safari', platform: 'OS X 10.11' }, sl_edge: { base: 'SauceLabs', browserName: 'MicrosoftEdge', platform: 'Windows 10' }, sl_ie_11: { base: 'SauceLabs', browserName: 'internet explorer', version: '11.103', platform: 'Windows 10' }, sl_ie_10: { base: 'SauceLabs', browserName: 'internet explorer', version: '10.0', platform: 'Windows 7' }, sl_ie_9: { base: 'SauceLabs', browserName: 'internet explorer', version: '9.0', platform: 'Windows 7' } }; var travisLaunchers = { chrome_travis: { base: 'Chrome', flags: ['--no-sandbox'] } }; var localBrowsers = realBrowser ? Object.keys(travisLaunchers) : ['PhantomJS']; module.exports = function(config) { config.set({ browsers: sauceLabs ? Object.keys(sauceLabsLaunchers) : localBrowsers, frameworks: ['source-map-support', 'mocha', 'chai-sinon'], reporters: ['mocha'].concat( coverage ? 'coverage' : [], sauceLabs ? 'saucelabs' : [] ), coverageReporter: { dir: __dirname+'/../coverage', reporters: [ { type: 'text-summary' }, { type: 'html' }, { type: 'lcovonly', subdir: '.', file: 'lcov.info' } ] }, mochaReporter: { showDiff: true }, browserLogOptions: { terminal: true }, browserConsoleLogOptions: { terminal: true }, browserNoActivityTimeout: 5 * 60 * 1000, // Use only two browsers concurrently, works better with open source Sauce Labs remote testing concurrency: 2, // sauceLabs: { // tunnelIdentifier: process.env.TRAVIS_JOB_NUMBER || ('local'+require('./package.json').version), // startConnect: false // }, customLaunchers: sauceLabs ? sauceLabsLaunchers : travisLaunchers, files: [ { pattern: 'polyfills.js', watched: false }, { pattern: '{browser,shared}/**.js', watched: false } ], preprocessors: { '**/*': ['webpack', 'sourcemap'] }, webpack: { devtool: 'inline-source-map', module: { /* Transpile source and test files */ rules: [ { enforce: 'pre', test: /\.jsx?$/, exclude: /node_modules/, loader: 'babel-loader', options: { comments: false, compact: true } }, /* Only Instrument our source files for coverage */ coverage ? { test: /\.jsx?$/, loader: 'isparta-loader', include: /src/ } : {} ] }, resolve: { // The React DevTools integration requires preact as a module // rather than referencing source files inside the module // directly alias: { preact: '../src/preact' }, modules: [__dirname, 'node_modules'] }, plugins: [ new webpack.DefinePlugin({ coverage: coverage, NODE_ENV: JSON.stringify(process.env.NODE_ENV || ''), ENABLE_PERFORMANCE: performance, DISABLE_FLAKEY: !!String(process.env.FLAKEY).match(/^(0|false)$/gi) }) ] }, webpackMiddleware: { noInfo: true } }); };
1
11,031
Unfortunately, I couldn't find a way to get karma to conditionally load stuff from a CDN, so I had to include it.
preactjs-preact
js
@@ -67,6 +67,10 @@ exclude = ["AuthalicMatrixCoefficients", "vnl_file_matrix", "vnl_file_vector", "vnl_fortran_copy", + "CosineWindowFunction", + "HammingWindowFunction", + "LanczosWindowFunction", + "WelchWindowFunction", ] total = 0
1
#========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #==========================================================================*/ from __future__ import print_function import itk import sys from itkTemplate import itkTemplate itk.auto_progress(2) itk.force_load() def isEmpty(o): for i in dir(o): if i[0].isupper(): return False return True exclude = ["AuthalicMatrixCoefficients", "MatrixCoefficients", "OnesMatrixCoefficients", "IntrinsicMatrixCoefficients", "HarmonicMatrixCoefficients", "ConformalMatrixCoefficients", "InverseEuclideanDistanceMatrixCoefficients", "BandNode", "NormalBandNode", "CellTraitsInfo", "DefaultDynamicMeshTraits", "DefaultStaticMeshTraits", "ParallelSparseFieldLevelSetNode", "SparseFieldLevelSetNode", "QuadEdgeMeshCellTraitsInfo", "QuadEdgeMeshTraits", "complex", "list", "map", "numeric_limits", "set", "vector", "vnl_c_vector", "vnl_diag_matrix", "vnl_matrix", "vnl_matrix_fixed", "vnl_matrix_fixed_ref", "vnl_matrix_fixed_ref_const", "vnl_matrix_ref", "vnl_vector", "vnl_vector_ref", "vnl_file_matrix", "vnl_file_vector", "vnl_fortran_copy", ] total = 0 empty = 0 for t in dir(itk): if t not in exclude: T = itk.__dict__[t] if isinstance(T, itkTemplate): for I in T.values(): total += 1 if isEmpty(I): empty += 1 print("%s: empty class" % I) print("%s classes checked." % total) if empty: print("%s empty classes found" % empty, file=sys.stderr) sys.exit(1)
1
10,418
Those functions are not currently wrapped, so I don't think it is necessary to exclude them (at least for now).
InsightSoftwareConsortium-ITK
py
@@ -27,7 +27,7 @@ import ( // This little bit of wrapping needs to be done because go doesn't do // covariance, but it does coerce *time.Timer into stoppable implicitly if we // write it out like so. -var afterFunc = func(c clock.Clock, d time.Duration, f func()) stoppable { +func afterFunc(c clock.Clock, d time.Duration, f func()) stoppable { t := c.NewTimer(d) go func() {
1
/* Copyright 2020 The cert-manager Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package scheduler import ( "sync" "time" "k8s.io/utils/clock" ) // For mocking purposes. // This little bit of wrapping needs to be done because go doesn't do // covariance, but it does coerce *time.Timer into stoppable implicitly if we // write it out like so. var afterFunc = func(c clock.Clock, d time.Duration, f func()) stoppable { t := c.NewTimer(d) go func() { defer t.Stop() if ti := <-t.C(); ti == (time.Time{}) { return } f() }() return t } // stoppable is the subset of time.Timer which we use, split out for mocking purposes type stoppable interface { Stop() bool } // ProcessFunc is a function to process an item in the work queue. type ProcessFunc func(interface{}) // ScheduledWorkQueue is an interface to describe a queue that will execute the // given ProcessFunc with the object given to Add once the time.Duration is up, // since the time of calling Add. type ScheduledWorkQueue interface { // Add will add an item to this queue, executing the ProcessFunc after the // Duration has come (since the time Add was called). If an existing Timer // for obj already exists, the previous timer will be cancelled. Add(interface{}, time.Duration) // Forget will cancel the timer for the given object, if the timer exists. Forget(interface{}) } type scheduledWorkQueue struct { processFunc ProcessFunc clock clock.Clock work map[interface{}]stoppable workLock sync.Mutex } // NewScheduledWorkQueue will create a new workqueue with the given processFunc func NewScheduledWorkQueue(clock clock.Clock, processFunc ProcessFunc) ScheduledWorkQueue { return &scheduledWorkQueue{ processFunc: processFunc, clock: clock, work: make(map[interface{}]stoppable), workLock: sync.Mutex{}, } } // Add will add an item to this queue, executing the ProcessFunc after the // Duration has come (since the time Add was called). If an existing Timer for // obj already exists, the previous timer will be cancelled. func (s *scheduledWorkQueue) Add(obj interface{}, duration time.Duration) { s.workLock.Lock() defer s.workLock.Unlock() s.forget(obj) s.work[obj] = afterFunc(s.clock, duration, func() { defer s.Forget(obj) s.processFunc(obj) }) } // Forget will cancel the timer for the given object, if the timer exists. func (s *scheduledWorkQueue) Forget(obj interface{}) { s.workLock.Lock() defer s.workLock.Unlock() s.forget(obj) } // forget cancels and removes an item. It *must* be called with the lock already held func (s *scheduledWorkQueue) forget(obj interface{}) { if timer, ok := s.work[obj]; ok { timer.Stop() delete(s.work, obj) } }
1
28,369
Nit: since this is now a private function rather than a variable, would it make sense to move it after the public functions in this file for readability?
jetstack-cert-manager
go
@@ -27,8 +27,7 @@ MolDraw2DQt::MolDraw2DQt(int width, int height, QPainter &qp, int panelWidth, // **************************************************************************** void MolDraw2DQt::setColour(const DrawColour &col) { MolDraw2D::setColour(col); - QColor this_col(int(255.0 * col.get<0>()), int(255.0 * col.get<1>()), - int(255.0 * col.get<2>())); + QColor this_col(int(255.0 * col.r), int(255.0 * col.g), int(255.0 * col.b)); QPen pen(this_col); pen.setJoinStyle(Qt::RoundJoin);
1
// // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // // Original author: David Cosgrove (AstraZeneca) // 19th June 2014 // #include "MolDraw2DQt.h" #include <QPainter> #include <QString> using namespace boost; using namespace std; namespace RDKit { // **************************************************************************** MolDraw2DQt::MolDraw2DQt(int width, int height, QPainter &qp, int panelWidth, int panelHeight) : MolDraw2D(width, height, panelWidth, panelHeight), qp_(qp) {} // **************************************************************************** void MolDraw2DQt::setColour(const DrawColour &col) { MolDraw2D::setColour(col); QColor this_col(int(255.0 * col.get<0>()), int(255.0 * col.get<1>()), int(255.0 * col.get<2>())); QPen pen(this_col); pen.setJoinStyle(Qt::RoundJoin); pen.setColor(this_col); qp_.setPen(pen); QBrush brush(this_col); brush.setStyle(Qt::SolidPattern); qp_.setBrush(brush); } // **************************************************************************** void MolDraw2DQt::drawLine(const Point2D &cds1, const Point2D &cds2) { Point2D c1 = getDrawCoords(cds1); Point2D c2 = getDrawCoords(cds2); const DashPattern &dashes = dash(); QPen pen = qp_.pen(); if (dashes.size()) { QVector<qreal> dd; for (unsigned int di = 0; di < dashes.size(); ++di) dd << dashes[di]; pen.setDashPattern(dd); } else { pen.setStyle(Qt::SolidLine); } pen.setWidth(lineWidth()); qp_.setPen(pen); qp_.drawLine(QPointF(c1.x, c1.y), QPointF(c2.x, c2.y)); } // **************************************************************************** // draw the char, with the bottom left hand corner at cds void MolDraw2DQt::drawChar(char c, const Point2D &cds) { QRectF br = qp_.boundingRect(0, 0, 100, 100, Qt::AlignLeft | Qt::AlignBottom, QString(c)); qp_.drawText(QRectF(cds.x, cds.y - br.height(), br.width(), br.height()), Qt::AlignLeft | Qt::AlignBottom, QString(c), &br); } // **************************************************************************** void MolDraw2DQt::drawPolygon(const vector<Point2D> &cds) { PRECONDITION(cds.size() >= 3, "must have at least three points"); #ifdef NOTYET QBrush brush("Black"); brush.setStyle(Qt::SolidPattern); DrawColour cc = colour(); brush.setColor( QColor(255.0 * cc.get<0>(), 255.0 * cc.get<1>(), 255.0 * cc.get<2>())); #endif qp_.save(); QBrush brush = qp_.brush(); if (fillPolys()) brush.setStyle(Qt::SolidPattern); else brush.setStyle(Qt::NoBrush); qp_.setBrush(brush); QPointF points[cds.size()]; for (unsigned int i = 0; i < cds.size(); ++i) { Point2D lc = getDrawCoords(cds[i]); points[i] = QPointF(lc.x, lc.y); } qp_.drawConvexPolygon(points, cds.size()); qp_.restore(); } // **************************************************************************** void MolDraw2DQt::clearDrawing() { QColor this_col(int(255.0 * drawOptions().backgroundColour.get<0>()), int(255.0 * drawOptions().backgroundColour.get<1>()), int(255.0 * drawOptions().backgroundColour.get<2>())); qp_.setBackground(QBrush(this_col)); qp_.fillRect(0, 0, width(), height(), this_col); } // **************************************************************************** void MolDraw2DQt::setFontSize(double new_size) { MolDraw2D::setFontSize(new_size); double font_size_in_points = fontSize() * scale(); #ifdef NOTYET cout << "initial font size in points : " << qp_.font().pointSizeF() << endl; cout << "font_size_in_points : " << font_size_in_points << endl; #endif QFont font(qp_.font()); font.setPointSizeF(font_size_in_points); qp_.setFont(font); while (1) { double old_font_size_in_points = font_size_in_points; double font_size_in_points = fontSize() * scale(); if (fabs(font_size_in_points - old_font_size_in_points) < 0.1) { break; } QFont font(qp_.font()); font.setPointSizeF(font_size_in_points); qp_.setFont(font); calculateScale(); } } // **************************************************************************** // using the current scale, work out the size of the label in molecule // coordinates void MolDraw2DQt::getStringSize(const string &label, double &label_width, double &label_height) const { label_width = 0.0; label_height = 0.0; TextDrawType draw_mode = TextDrawNormal; // 0 for normal, 1 for superscript, 2 for subscript QString next_char(" "); bool had_a_super = false; for (int i = 0, is = label.length(); i < is; ++i) { // setStringDrawMode moves i along to the end of any <sub> or <sup> // markup if ('<' == label[i] && setStringDrawMode(label, draw_mode, i)) { continue; } next_char[0] = label[i]; QRectF br = qp_.boundingRect(0, 0, 100, 100, Qt::AlignBottom | Qt::AlignLeft, next_char); label_height = br.height() / scale(); double char_width = br.width() / scale(); if (TextDrawSubscript == draw_mode) { char_width *= 0.5; } else if (TextDrawSuperscript == draw_mode) { char_width *= 0.5; had_a_super = true; } label_width += char_width; } // subscript keeps its bottom in line with the bottom of the bit chars, // superscript goes above the original char top by a quarter if (had_a_super) { label_height *= 1.25; } } } // EO namespace RDKit
1
19,528
I'm assuming that MolDraw2Qt drops the alpha channel?
rdkit-rdkit
cpp
@@ -24,6 +24,9 @@ namespace System.Text.Json.Serialization.Tests [GenericTypeArguments(typeof(HashSet<string>))] [GenericTypeArguments(typeof(ArrayList))] [GenericTypeArguments(typeof(Hashtable))] + [GenericTypeArguments(typeof(SimpleStructWithProperties))] + [GenericTypeArguments(typeof(LargeStructWithProperties))] + [GenericTypeArguments(typeof(int))] public class ReadJson<T> { private string _serialized;
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using BenchmarkDotNet.Attributes; using MicroBenchmarks; using MicroBenchmarks.Serializers; using System.Collections; using System.Collections.Generic; using System.Collections.Immutable; using System.IO; using System.Threading.Tasks; namespace System.Text.Json.Serialization.Tests { [GenericTypeArguments(typeof(LoginViewModel))] [GenericTypeArguments(typeof(Location))] [GenericTypeArguments(typeof(IndexViewModel))] [GenericTypeArguments(typeof(MyEventsListerViewModel))] [GenericTypeArguments(typeof(BinaryData))] [GenericTypeArguments(typeof(Dictionary<string, string>))] [GenericTypeArguments(typeof(ImmutableDictionary<string, string>))] [GenericTypeArguments(typeof(ImmutableSortedDictionary<string, string>))] [GenericTypeArguments(typeof(HashSet<string>))] [GenericTypeArguments(typeof(ArrayList))] [GenericTypeArguments(typeof(Hashtable))] public class ReadJson<T> { private string _serialized; private byte[] _utf8Serialized; private MemoryStream _memoryStream; [GlobalSetup] public async Task Setup() { T value = DataGenerator.Generate<T>(); _serialized = JsonSerializer.Serialize(value); _utf8Serialized = Encoding.UTF8.GetBytes(_serialized); _memoryStream = new MemoryStream(capacity: short.MaxValue); await JsonSerializer.SerializeAsync(_memoryStream, value); } [BenchmarkCategory(Categories.Libraries, Categories.JSON)] [Benchmark] public T DeserializeFromString() => JsonSerializer.Deserialize<T>(_serialized); [BenchmarkCategory(Categories.Libraries, Categories.JSON)] [Benchmark] public T DeserializeFromUtf8Bytes() => JsonSerializer.Deserialize<T>(_utf8Serialized); [BenchmarkCategory(Categories.Libraries, Categories.JSON)] [Benchmark] public async Task<T> DeserializeFromStream() { _memoryStream.Position = 0; T value = await JsonSerializer.DeserializeAsync<T>(_memoryStream); return value; } [GlobalCleanup] public void Cleanup() => _memoryStream.Dispose(); } }
1
10,932
the code looks good to me, but I just wonder if it is a real use case: (de)serializing a single integer.
dotnet-performance
.cs
@@ -472,7 +472,18 @@ class Composition(collections.Hashable, collections.Mapping, MSONable): Returns: Composition with that formula. + + Notes: + In the case of Metallofullerene formula (e.g. Y3N@C80), + the @ mark will be dropped and passed to parser. """ + print(formula) + # for Metallofullerene like "Y3N@C80" + if "@" in formula: + print(formula) + formula = formula.replace("@", "") + print(formula) + def get_sym_dict(f, factor): sym_dict = collections.defaultdict(float) for m in re.finditer(r"([A-Z][a-z]*)\s*([-*\.\d]*)", f):
1
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals import collections import numbers import string from itertools import combinations_with_replacement, product import os import six import re from collections import defaultdict from monty.serialization import loadfn from six.moves import filter, map, zip from functools import total_ordering from monty.fractions import gcd, gcd_float from pymatgen.core.periodic_table import get_el_sp, Element, Specie from pymatgen.util.string import formula_double_format from monty.json import MSONable from pymatgen.core.units import unitized """ This module implements a Composition class to represent compositions, and a ChemicalPotential class to represent potentials. """ __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "[email protected]" __status__ = "Production" __date__ = "Nov 10, 2012" @total_ordering class Composition(collections.Hashable, collections.Mapping, MSONable): """ Represents a Composition, which is essentially a {element:amount} mapping type. Composition is written to be immutable and hashable, unlike a standard Python dict. Note that the key can be either an Element or a Specie. Elements and Specie are treated differently. i.e., a Fe2+ is not the same as a Fe3+ Specie and would be put in separate keys. This differentiation is deliberate to support using Composition to determine the fraction of a particular Specie. Works almost completely like a standard python dictionary, except that __getitem__ is overridden to return 0 when an element is not found. (somewhat like a defaultdict, except it is immutable). Also adds more convenience methods relevant to compositions, e.g., get_fraction. It should also be noted that many Composition related functionality takes in a standard string as a convenient input. For example, even though the internal representation of a Fe2O3 composition is {Element("Fe"): 2, Element("O"): 3}, you can obtain the amount of Fe simply by comp["Fe"] instead of the more verbose comp[Element("Fe")]. >>> comp = Composition("LiFePO4") >>> comp.get_atomic_fraction(Element("Li")) 0.14285714285714285 >>> comp.num_atoms 7.0 >>> comp.reduced_formula 'LiFePO4' >>> comp.formula 'Li1 Fe1 P1 O4' >>> comp.get_wt_fraction(Element("Li")) 0.04399794666951898 >>> comp.num_atoms 7.0 """ """ Tolerance in distinguishing different composition amounts. 1e-8 is fairly tight, but should cut out most floating point arithmetic errors. """ amount_tolerance = 1e-8 """ Special formula handling for peroxides and certain elements. This is so that formula output does not write LiO instead of Li2O2 for example. """ special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2", "HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2", "O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2", "H": "H2"} oxi_prob = None # prior probability of oxidation used by oxi_state_guesses def __init__(self, *args, **kwargs): # allow_negative=False """ Very flexible Composition construction, similar to the built-in Python dict(). Also extended to allow simple string init. Args: Any form supported by the Python built-in dict() function. 1. A dict of either {Element/Specie: amount}, {string symbol:amount}, or {atomic number:amount} or any mixture of these. E.g., {Element("Li"):2 ,Element("O"):1}, {"Li":2, "O":1}, {3:2, 8:1} all result in a Li2O composition. 2. Keyword arg initialization, similar to a dict, e.g., Composition(Li = 2, O = 1) In addition, the Composition constructor also allows a single string as an input formula. E.g., Composition("Li2O"). allow_negative: Whether to allow negative compositions. This argument must be popped from the \\*\\*kwargs due to \\*args ambiguity. """ self.allow_negative = kwargs.pop('allow_negative', False) # it's much faster to recognize a composition and use the elmap than # to pass the composition to dict() if len(args) == 1 and isinstance(args[0], Composition): elmap = args[0] elif len(args) == 1 and isinstance(args[0], six.string_types): elmap = self._parse_formula(args[0]) else: elmap = dict(*args, **kwargs) elamt = {} self._natoms = 0 for k, v in elmap.items(): if v < -Composition.amount_tolerance and not self.allow_negative: raise CompositionError("Amounts in Composition cannot be " "negative!") if abs(v) >= Composition.amount_tolerance: elamt[get_el_sp(k)] = v self._natoms += abs(v) self._data = elamt def __getitem__(self, item): try: sp = get_el_sp(item) return self._data.get(sp, 0) except ValueError as ex: raise TypeError("Invalid key {}, {} for Composition\n" "ValueError exception:\n{}".format(item, type(item), ex)) def __len__(self): return len(self._data) def __iter__(self): return self._data.keys().__iter__() def __contains__(self, item): try: sp = get_el_sp(item) return sp in self._data except ValueError as ex: raise TypeError("Invalid key {}, {} for Composition\n" "ValueError exception:\n{}".format(item, type(item), ex)) def __eq__(self, other): # elements with amounts < Composition.amount_tolerance don't show up # in the elmap, so checking len enables us to only check one # compositions elements if len(self) != len(other): return False for el, v in self.items(): if abs(v - other[el]) > Composition.amount_tolerance: return False return True def __ge__(self, other): """ Defines >= for Compositions. Should ONLY be used for defining a sort order (the behavior is probably not what you'd expect) """ for el in sorted(set(self.elements + other.elements)): if other[el] - self[el] >= Composition.amount_tolerance: return False elif self[el] - other[el] >= Composition.amount_tolerance: return True return True def __ne__(self, other): return not self.__eq__(other) def __add__(self, other): """ Adds two compositions. For example, an Fe2O3 composition + an FeO composition gives a Fe3O4 composition. """ new_el_map = collections.defaultdict(float) new_el_map.update(self) for k, v in other.items(): new_el_map[get_el_sp(k)] += v return Composition(new_el_map, allow_negative=self.allow_negative) def __sub__(self, other): """ Subtracts two compositions. For example, an Fe2O3 composition - an FeO composition gives an FeO2 composition. Raises: CompositionError if the subtracted composition is greater than the original composition in any of its elements, unless allow_negative is True """ new_el_map = collections.defaultdict(float) new_el_map.update(self) for k, v in other.items(): new_el_map[get_el_sp(k)] -= v return Composition(new_el_map, allow_negative=self.allow_negative) def __mul__(self, other): """ Multiply a Composition by an integer or a float. Fe2O3 * 4 -> Fe8O12 """ if not isinstance(other, numbers.Number): return NotImplemented return Composition({el: self[el] * other for el in self}, allow_negative=self.allow_negative) __rmul__ = __mul__ def __truediv__(self, other): if not isinstance(other, numbers.Number): return NotImplemented return Composition({el: self[el] / other for el in self}, allow_negative=self.allow_negative) __div__ = __truediv__ def __hash__(self): """ Minimally effective hash function that just distinguishes between Compositions with different elements. """ hashcode = 0 for el, amt in self.items(): if abs(amt) > Composition.amount_tolerance: hashcode += el.Z return hashcode @property def average_electroneg(self): return sum((el.X * abs(amt) for el, amt in self.items())) / \ self.num_atoms @property def total_electrons(self): return sum((el.Z * abs(amt) for el, amt in self.items())) def almost_equals(self, other, rtol=0.1, atol=1e-8): """ Returns true if compositions are equal within a tolerance. Args: other (Composition): Other composition to check rtol (float): Relative tolerance atol (float): Absolute tolerance """ sps = set(self.elements + other.elements) for sp in sps: a = self[sp] b = other[sp] tol = atol + rtol * (abs(a) + abs(b)) / 2 if abs(b - a) > tol: return False return True @property def is_element(self): """ True if composition is for an element. """ return len(self) == 1 def copy(self): return Composition(self, allow_negative=self.allow_negative) @property def formula(self): """ Returns a formula string, with elements sorted by electronegativity, e.g., Li4 Fe4 P4 O16. """ sym_amt = self.get_el_amt_dict() syms = sorted(sym_amt.keys(), key=lambda sym: get_el_sp(sym).X) formula = [s + formula_double_format(sym_amt[s], False) for s in syms] return " ".join(formula) @property def alphabetical_formula(self): """ Returns a formula string, with elements sorted by alphabetically e.g., Fe4 Li4 O16 P4. """ sym_amt = self.get_el_amt_dict() syms = sorted(sym_amt.keys()) formula = [s + formula_double_format(sym_amt[s], False) for s in syms] return " ".join(formula) @property def element_composition(self): """ Returns the composition replacing any species by the corresponding element. """ return Composition(self.get_el_amt_dict(), allow_negative=self.allow_negative) @property def fractional_composition(self): """ Returns the normalized composition which the number of species sum to 1. Returns: Normalized composition which the number of species sum to 1. """ return self / self._natoms @property def reduced_composition(self): """ Returns the reduced composition,i.e. amounts normalized by greatest common denominator. e.g., Composition("FePO4") for Composition("Fe4P4O16"). """ return self.get_reduced_composition_and_factor()[0] def get_reduced_composition_and_factor(self): """ Calculates a reduced composition and factor. Returns: A normalized composition and a multiplicative factor, i.e., Li4Fe4P4O16 returns (Composition("LiFePO4"), 4). """ factor = self.get_reduced_formula_and_factor()[1] return self / factor, factor def get_reduced_formula_and_factor(self): """ Calculates a reduced formula and factor. Returns: A pretty normalized formula and a multiplicative factor, i.e., Li4Fe4P4O16 returns (LiFePO4, 4). """ all_int = all(abs(x - round(x)) < Composition.amount_tolerance for x in self.values()) if not all_int: return self.formula.replace(" ", ""), 1 d = {k: int(round(v)) for k, v in self.get_el_amt_dict().items()} (formula, factor) = reduce_formula(d) if formula in Composition.special_formulas: formula = Composition.special_formulas[formula] factor /= 2 return formula, factor def get_integer_formula_and_factor(self, max_denominator=10000): """ Calculates an integer formula and factor. Args: max_denominator (int): all amounts in the el:amt dict are first converted to a Fraction with this maximum denominator Returns: A pretty normalized formula and a multiplicative factor, i.e., Li0.5O0.25 returns (Li2O, 0.25). O0.25 returns (O2, 0.125) """ el_amt = self.get_el_amt_dict() g = gcd_float(list(el_amt.values()), 1 / max_denominator) d = {k: round(v / g) for k, v in el_amt.items()} (formula, factor) = reduce_formula(d) if formula in Composition.special_formulas: formula = Composition.special_formulas[formula] factor /= 2 return formula, factor * g @property def reduced_formula(self): """ Returns a pretty normalized formula, i.e., LiFePO4 instead of Li4Fe4P4O16. """ return self.get_reduced_formula_and_factor()[0] @property def hill_formula(self): c = self.element_composition elements = sorted([el.symbol for el in c.keys()]) if "C" in elements: elements = ["C"] + [el for el in elements if el != "C"] formula = ["%s%s" % (el, formula_double_format(c[el]) if c[el] != 1 else "") for el in elements] return " ".join(formula) @property def elements(self): """ Returns view of elements in Composition. """ return list(self.keys()) def __str__(self): return " ".join([ "{}{}".format(k, formula_double_format(v, ignore_ones=False)) for k, v in self.as_dict().items()]) @property def num_atoms(self): """ Total number of atoms in Composition. For negative amounts, sum of absolute values """ return self._natoms @property @unitized("amu") def weight(self): """ Total molecular weight of Composition """ return sum([amount * el.atomic_mass for el, amount in self.items()]) def get_atomic_fraction(self, el): """ Calculate atomic fraction of an Element or Specie. Args: el (Element/Specie): Element or Specie to get fraction for. Returns: Atomic fraction for element el in Composition """ return abs(self[el]) / self._natoms def get_wt_fraction(self, el): """ Calculate weight fraction of an Element or Specie. Args: el (Element/Specie): Element or Specie to get fraction for. Returns: Weight fraction for element el in Composition """ return get_el_sp(el).atomic_mass * abs(self[el]) / self.weight def _parse_formula(self, formula): """ Args: formula (str): A string formula, e.g. Fe2O3, Li3Fe2(PO4)3 Returns: Composition with that formula. """ def get_sym_dict(f, factor): sym_dict = collections.defaultdict(float) for m in re.finditer(r"([A-Z][a-z]*)\s*([-*\.\d]*)", f): el = m.group(1) amt = 1 if m.group(2).strip() != "": amt = float(m.group(2)) sym_dict[el] += amt * factor f = f.replace(m.group(), "", 1) if f.strip(): raise CompositionError("{} is an invalid formula!".format(f)) return sym_dict m = re.search(r"\(([^\(\)]+)\)\s*([\.\d]*)", formula) if m: factor = 1 if m.group(2) != "": factor = float(m.group(2)) unit_sym_dict = get_sym_dict(m.group(1), factor) expanded_sym = "".join(["{}{}".format(el, amt) for el, amt in unit_sym_dict.items()]) expanded_formula = formula.replace(m.group(), expanded_sym) return self._parse_formula(expanded_formula) return get_sym_dict(formula, 1) @property def anonymized_formula(self): """ An anonymized formula. Unique species are arranged in ordering of increasing amounts and assigned ascending alphabets. Useful for prototyping formulas. For example, all stoichiometric perovskites have anonymized_formula ABC3. """ reduced = self.element_composition if all(x == int(x) for x in self.values()): reduced /= gcd(*(int(i) for i in self.values())) anon = "" for e, amt in zip(string.ascii_uppercase, sorted(reduced.values())): if amt == 1: amt_str = "" elif abs(amt % 1) < 1e-8: amt_str = str(int(amt)) else: amt_str = str(amt) anon += ("{}{}".format(e, amt_str)) return anon def __repr__(self): return "Comp: " + self.formula @classmethod def from_dict(cls, d): """ Creates a composition from a dict generated by as_dict(). Strictly not necessary given that the standard constructor already takes in such an input, but this method preserves the standard pymatgen API of having from_dict methods to reconstitute objects generated by as_dict(). Allows for easier introspection. Args: d (dict): {symbol: amount} dict. """ return cls(d) def get_el_amt_dict(self): """ Returns: Dict with element symbol and (unreduced) amount e.g., {"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0} """ d = collections.defaultdict(float) for e, a in self.items(): d[e.symbol] += a return d def as_dict(self): """ Returns: dict with species symbol and (unreduced) amount e.g., {"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0} """ d = collections.defaultdict(float) for e, a in self.items(): d[str(e)] += a return d @property def to_reduced_dict(self): """ Returns: Dict with element symbol and reduced amount e.g., {"Fe": 2.0, "O":3.0} """ c = Composition(self.reduced_formula) return c.as_dict() @property def to_data_dict(self): """ Returns: A dict with many keys and values relating to Composition/Formula, including reduced_cell_composition, unit_cell_composition, reduced_cell_formula, elements and nelements. """ return {"reduced_cell_composition": self.to_reduced_dict, "unit_cell_composition": self.as_dict(), "reduced_cell_formula": self.reduced_formula, "elements": self.as_dict().keys(), "nelements": len(self.as_dict().keys())} def oxi_state_guesses(self, oxi_states_override=None, target_charge=0, all_oxi_states=False, max_sites=None): """ Checks if the composition is charge-balanced and returns back all charge-balanced oxidation state combinations. Composition must have integer values. Note that more num_atoms in the composition gives more degrees of freedom. e.g., if possible oxidation states of element X are [2,4] and Y are [-3], then XY is not charge balanced but X2Y2 is. Results are returned from most to least probable based on ICSD statistics. Use max_sites to improve performance if needed. Args: oxi_states_override (dict): dict of str->list to override an element's common oxidation states, e.g. {"V": [2,3,4,5]} target_charge (int): the desired total charge on the structure. Default is 0 signifying charge balance. all_oxi_states (bool): if True, an element defaults to all oxidation states in pymatgen Element.icsd_oxidation_states. Otherwise, default is Element.common_oxidation_states. Note that the full oxidation state list is *very* inclusive and can produce nonsensical results. max_sites (int): if possible, will reduce Compositions to at most this many many sites to speed up oxidation state guesses. Set to -1 to just reduce fully. Returns: A list of dicts - each dict reports an element symbol and average oxidation state across all sites in that composition. If the composition is not charge balanced, an empty list is returned. """ return self._get_oxid_state_guesses(all_oxi_states, max_sites, oxi_states_override, target_charge)[0] def add_charges_from_oxi_state_guesses(self, oxi_states_override=None, target_charge=0, all_oxi_states=False, max_sites=None): """ Assign oxidation states basedon guessed oxidation states. See `oxi_state_guesses` for an explanation of how oxidation states are guessed. This operation uses the set of oxidation states for each site that were determined to be most likley from the oxidation state guessing routine. Args: oxi_states_override (dict): dict of str->list to override an element's common oxidation states, e.g. {"V": [2,3,4,5]} target_charge (int): the desired total charge on the structure. Default is 0 signifying charge balance. all_oxi_states (bool): if True, an element defaults to all oxidation states in pymatgen Element.icsd_oxidation_states. Otherwise, default is Element.common_oxidation_states. Note that the full oxidation state list is *very* inclusive and can produce nonsensical results. max_sites (int): if possible, will reduce Compositions to at most this many many sites to speed up oxidation state guesses. Set to -1 to just reduce fully. Returns: Composition, where the elements are assigned oxidation states based on the results form guessing oxidation states. If no oxidation state is possible, returns a Composition where all oxidation states are 0 """ _, oxidation_states = self._get_oxid_state_guesses(all_oxi_states, max_sites, oxi_states_override, target_charge) # Special case: No charged compound is possible if len(oxidation_states) == 0: return Composition(dict((Specie(e,0),f) for e,f in self.items())) # Generate the species species = [] for el, charges in oxidation_states[0].items(): species.extend([Specie(el,c) for c in charges]) # Return the new object return Composition(collections.Counter(species)) def _get_oxid_state_guesses(self, all_oxi_states, max_sites, oxi_states_override, target_charge): """ Utility operation for guessing oxidation states. See `oxi_state_guesses` for full details. This operation does the calculation of the most likely oxidation states Args: oxi_states_override (dict): dict of str->list to override an element's common oxidation states, e.g. {"V": [2,3,4,5]} target_charge (int): the desired total charge on the structure. Default is 0 signifying charge balance. all_oxi_states (bool): if True, an element defaults to all oxidation states in pymatgen Element.icsd_oxidation_states. Otherwise, default is Element.common_oxidation_states. Note that the full oxidation state list is *very* inclusive and can produce nonsensical results. max_sites (int): if possible, will reduce Compositions to at most this many many sites to speed up oxidation state guesses. Set to -1 to just reduce fully. Returns: A list of dicts - each dict reports an element symbol and average oxidation state across all sites in that composition. If the composition is not charge balanced, an empty list is returned. A list of dicts - each dict maps the element symbol to a list of oxidation states for each site of that element. For example, Fe3O4 could return a list of [2,2,2,3,3,3] for the oxidation states of If the composition is """ comp = self.copy() # reduce Composition if necessary if max_sites == -1: comp = self.reduced_composition elif max_sites and comp.num_atoms > max_sites: reduced_comp, reduced_factor = self. \ get_reduced_composition_and_factor() if reduced_factor > 1: reduced_comp *= max(1, int(max_sites / reduced_comp.num_atoms)) comp = reduced_comp # as close to max_sites as possible if comp.num_atoms > max_sites: raise ValueError("Composition {} cannot accommodate max_sites " "setting!".format(comp)) # Load prior probabilities of oxidation states, used to rank solutions if not Composition.oxi_prob: module_dir = os.path.join(os.path. dirname(os.path.abspath(__file__))) all_data = loadfn(os.path.join(module_dir, "..", "analysis", "icsd_bv.yaml")) Composition.oxi_prob = {Specie.from_string(sp): data for sp, data in all_data["occurrence"].items()} oxi_states_override = oxi_states_override or {} # assert: Composition only has integer amounts if not all(amt == int(amt) for amt in comp.values()): raise ValueError("Charge balance analysis requires integer " "values in Composition!") # for each element, determine all possible sum of oxidations # (taking into account nsites for that particular element) el_amt = comp.get_el_amt_dict() els = el_amt.keys() el_sums = [] # matrix: dim1= el_idx, dim2=possible sums el_sum_scores = defaultdict(set) # dict of el_idx, sum -> score el_best_oxid_combo = {} # dict of el_idx, sum -> oxid combo with best score for idx, el in enumerate(els): el_sum_scores[idx] = {} el_best_oxid_combo[idx] = {} el_sums.append([]) if oxi_states_override.get(el): oxids = oxi_states_override[el] elif all_oxi_states: oxids = Element(el).oxidation_states else: oxids = Element(el).icsd_oxidation_states or \ Element(el).oxidation_states # get all possible combinations of oxidation states # and sum each combination for oxid_combo in combinations_with_replacement(oxids, int(el_amt[el])): # List this sum as a possible option oxid_sum = sum(oxid_combo) if oxid_sum not in el_sums[idx]: el_sums[idx].append(oxid_sum) # Determine how probable is this combo? score = sum([Composition.oxi_prob.get(Specie(el, o), 0) for o in oxid_combo]) # If it is the most probable combo for a certain sum, # store the combination if oxid_sum not in el_sum_scores[idx] or score > el_sum_scores[idx].get(oxid_sum, 0): el_sum_scores[idx][oxid_sum] = score el_best_oxid_combo[idx][oxid_sum] = oxid_combo # Determine which combination of oxidation states for each element # is the most probable all_sols = [] # will contain all solutions all_oxid_combo = [] # will contain the best combination of oxidation states for each site all_scores = [] # will contain a score for each solution for x in product(*el_sums): # each x is a trial of one possible oxidation sum for each element if sum(x) == target_charge: # charge balance condition el_sum_sol = dict(zip(els, x)) # element->oxid_sum # normalize oxid_sum by amount to get avg oxid state sol = {el: v / el_amt[el] for el, v in el_sum_sol.items()} all_sols.append(sol) # add the solution to the list of solutions # determine the score for this solution score = 0 for idx, v in enumerate(x): score += el_sum_scores[idx][v] all_scores.append(score) # collect the combination of oxidation states for each site all_oxid_combo.append(dict((e,el_best_oxid_combo[idx][v]) for idx, (e,v) in enumerate(zip(els,x)))) # sort the solutions by highest to lowest score if len(all_scores) > 0: all_sols, all_oxid_combo = zip(*[(y, x) for (z, y, x) in sorted(zip(all_scores, all_sols, all_oxid_combo), key=lambda pair: pair[0], reverse=True)]) return all_sols, all_oxid_combo @staticmethod def ranked_compositions_from_indeterminate_formula(fuzzy_formula, lock_if_strict=True): """ Takes in a formula where capitilization might not be correctly entered, and suggests a ranked list of potential Composition matches. Author: Anubhav Jain Args: fuzzy_formula (str): A formula string, such as "co2o3" or "MN", that may or may not have multiple interpretations lock_if_strict (bool): If true, a properly entered formula will only return the one correct interpretation. For example, "Co1" will only return "Co1" if true, but will return both "Co1" and "C1 O1" if false. Returns: A ranked list of potential Composition matches """ #if we have an exact match and the user specifies lock_if_strict, just #return the exact match! if lock_if_strict: #the strict composition parsing might throw an error, we can ignore #it and just get on with fuzzy matching try: comp = Composition(fuzzy_formula) return [comp] except (CompositionError, ValueError): pass all_matches = Composition._comps_from_fuzzy_formula(fuzzy_formula) #remove duplicates all_matches = list(set(all_matches)) #sort matches by rank descending all_matches = sorted(all_matches, key=lambda match: match[1], reverse=True) all_matches = [m[0] for m in all_matches] return all_matches @staticmethod def _comps_from_fuzzy_formula(fuzzy_formula, m_dict={}, m_points=0, factor=1): """ A recursive helper method for formula parsing that helps in interpreting and ranking indeterminate formulas. Author: Anubhav Jain Args: fuzzy_formula (str): A formula string, such as "co2o3" or "MN", that may or may not have multiple interpretations. m_dict (dict): A symbol:amt dictionary from the previously parsed formula. m_points: Number of points gained from the previously parsed formula. factor: Coefficient for this parse, e.g. (PO4)2 will feed in PO4 as the fuzzy_formula with a coefficient of 2. Returns: A list of tuples, with the first element being a Composition and the second element being the number of points awarded that Composition intepretation. """ def _parse_chomp_and_rank(m, f, m_dict, m_points): """ A helper method for formula parsing that helps in interpreting and ranking indeterminate formulas Author: Anubhav Jain Args: m: A regex match, with the first group being the element and the second group being the amount f: The formula part containing the match m_dict: A symbol:amt dictionary from the previously parsed formula m_points: Number of points gained from the previously parsed formula Returns: A tuple of (f, m_dict, points) where m_dict now contains data from the match and the match has been removed (chomped) from the formula f. The "goodness" of the match determines the number of points returned for chomping. Returns (None, None, None) if no element could be found... """ points = 0 # Points awarded if the first element of the element is correctly # specified as a capital points_first_capital = 100 # Points awarded if the second letter of the element is correctly # specified as lowercase points_second_lowercase = 100 #get element and amount from regex match el = m.group(1) if len(el) > 2 or len(el) < 1: raise CompositionError("Invalid element symbol entered!") amt = float(m.group(2)) if m.group(2).strip() != "" else 1 #convert the element string to proper [uppercase,lowercase] format #and award points if it is already in that format char1 = el[0] char2 = el[1] if len(el) > 1 else "" if char1 == char1.upper(): points += points_first_capital if char2 and char2 == char2.lower(): points += points_second_lowercase el = char1.upper() + char2.lower() #if it's a valid element, chomp and add to the points if Element.is_valid_symbol(el): if el in m_dict: m_dict[el] += amt * factor else: m_dict[el] = amt * factor return f.replace(m.group(), "", 1), m_dict, m_points + points #else return None return None, None, None fuzzy_formula = fuzzy_formula.strip() if len(fuzzy_formula) == 0: # The entire formula has been parsed into m_dict. Return the # corresponding Composition and number of points if m_dict: yield (Composition.from_dict(m_dict), m_points) else: # if there is a parenthesis, remove it and match the remaining stuff # with the appropriate factor for mp in re.finditer(r"\(([^\(\)]+)\)([\.\d]*)", fuzzy_formula): mp_points = m_points mp_form = fuzzy_formula.replace(mp.group(), " ", 1) mp_dict = dict(m_dict) mp_factor = 1 if mp.group(2) == "" else float(mp.group(2)) # Match the stuff inside the parenthesis with the appropriate # factor for match in \ Composition._comps_from_fuzzy_formula(mp.group(1), mp_dict, mp_points, factor=mp_factor): only_me = True # Match the stuff outside the parentheses and return the # sum. for match2 in \ Composition._comps_from_fuzzy_formula(mp_form, mp_dict, mp_points, factor=1): only_me = False yield (match[0] + match2[0], match[1] + match2[1]) # if the stuff inside the parenthesis is nothing, then just # return the stuff inside the parentheses if only_me: yield match return # try to match the single-letter elements m1 = re.match(r"([A-z])([\.\d]*)", fuzzy_formula) if m1: m_points1 = m_points m_form1 = fuzzy_formula m_dict1 = dict(m_dict) (m_form1, m_dict1, m_points1) = \ _parse_chomp_and_rank(m1, m_form1, m_dict1, m_points1) if m_dict1: #there was a real match for match in \ Composition._comps_from_fuzzy_formula(m_form1, m_dict1, m_points1, factor): yield match #try to match two-letter elements m2 = re.match(r"([A-z]{2})([\.\d]*)", fuzzy_formula) if m2: m_points2 = m_points m_form2 = fuzzy_formula m_dict2 = dict(m_dict) (m_form2, m_dict2, m_points2) = \ _parse_chomp_and_rank(m2, m_form2, m_dict2, m_points2) if m_dict2: #there was a real match for match in \ Composition._comps_from_fuzzy_formula(m_form2, m_dict2, m_points2, factor): yield match def reduce_formula(sym_amt): """ Helper method to reduce a sym_amt dict to a reduced formula and factor. Args: sym_amt (dict): {symbol: amount}. Returns: (reduced_formula, factor). """ syms = sorted(sym_amt.keys(), key=lambda s: [get_el_sp(s).X, s]) syms = list(filter(lambda s: abs(sym_amt[s]) > Composition.amount_tolerance, syms)) num_el = len(syms) contains_polyanion = (num_el >= 3 and get_el_sp(syms[num_el - 1]).X - get_el_sp(syms[num_el - 2]).X < 1.65) factor = 1 # Enforce integers for doing gcd. if all((int(i) == i for i in sym_amt.values())): factor = abs(gcd(*(int(i) for i in sym_amt.values()))) reduced_form = [] n = num_el - 2 if contains_polyanion else num_el for i in range(0, n): s = syms[i] normamt = sym_amt[s] * 1.0 / factor reduced_form.append(s) reduced_form.append(formula_double_format(normamt)) if contains_polyanion: poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor for i in range(n, num_el)} (poly_form, poly_factor) = reduce_formula(poly_sym_amt) if poly_factor != 1: reduced_form.append("({}){}".format(poly_form, int(poly_factor))) else: reduced_form.append(poly_form) reduced_form = "".join(reduced_form) return reduced_form, factor class CompositionError(Exception): """Exception class for composition errors""" pass class ChemicalPotential(dict, MSONable): """ Class to represent set of chemical potentials. Can be: multiplied/divided by a Number multiplied by a Composition (returns an energy) added/subtracted with other ChemicalPotentials. """ def __init__(self, *args, **kwargs): """ Args: *args, **kwargs: any valid dict init arguments """ d = dict(*args, **kwargs) super(ChemicalPotential, self).__init__((get_el_sp(k), v) for k, v in d.items()) if len(d) != len(self): raise ValueError("Duplicate potential specified") def __mul__(self, other): if isinstance(other, numbers.Number): return ChemicalPotential({k: v * other for k, v in self.items()}) else: return NotImplemented __rmul__ = __mul__ def __truediv__(self, other): if isinstance(other, numbers.Number): return ChemicalPotential({k: v / other for k, v in self.items()}) else: return NotImplemented __div__ = __truediv__ def __sub__(self, other): if isinstance(other, ChemicalPotential): els = set(self.keys()).union(other.keys()) return ChemicalPotential({e: self.get(e, 0) - other.get(e, 0) for e in els}) else: return NotImplemented def __add__(self, other): if isinstance(other, ChemicalPotential): els = set(self.keys()).union(other.keys()) return ChemicalPotential({e: self.get(e, 0) + other.get(e, 0) for e in els}) else: return NotImplemented def get_energy(self, composition, strict=True): """ Calculates the energy of a composition. Args: composition (Composition): input composition strict (bool): Whether all potentials must be specified """ if strict and set(composition.keys()) > set(self.keys()): s = set(composition.keys()) - set(self.keys()) raise ValueError("Potentials not specified for {}".format(s)) return sum(self.get(k, 0) * v for k, v in composition.items()) def __repr__(self): return "ChemPots: " + super(ChemicalPotential, self).__repr__() if __name__ == "__main__": import doctest doctest.testmod()
1
17,544
Pls remove the print statements. Also, you do not need the if statement.
materialsproject-pymatgen
py
@@ -112,7 +112,7 @@ def patch_moto(): # escape message responses to allow for special characters like "<" sqs_responses.RECEIVE_MESSAGE_RESPONSE = sqs_responses.RECEIVE_MESSAGE_RESPONSE.replace( - "<StringValue>{{ value.string_value }}</StringValue>", + "<StringValue><![CDATA[{{ value.string_value }}]]></StringValue>", "<StringValue>{{ _escape(value.string_value) }}</StringValue>", )
1
import logging import os import types from html import escape from moto.core.utils import camelcase_to_underscores from moto.sqs import responses as sqs_responses from moto.sqs.exceptions import QueueDoesNotExist from moto.sqs.models import Queue from localstack import config from localstack.config import LOCALSTACK_HOSTNAME, TMP_FOLDER from localstack.services.infra import ( do_run, log_startup_message, start_moto_server, start_proxy_for_service, ) from localstack.services.install import INSTALL_DIR_ELASTICMQ, SQS_BACKEND_IMPL, install_elasticmq from localstack.utils.aws import aws_stack from localstack.utils.common import ( TMP_FILES, get_free_tcp_port, save_file, short_uid, to_str, wait_for_port_open, ) LOG = logging.getLogger(__name__) # backend port (configured at startup) PORT_SQS_BACKEND = None # max heap size allocated for the Java process MAX_HEAP_SIZE = "256m" def check_sqs(expect_shutdown=False, print_error=False): out = None try: # wait for port to be opened wait_for_port_open(PORT_SQS_BACKEND) # check SQS endpoint_url = f"http://127.0.0.1:{PORT_SQS_BACKEND}" out = aws_stack.connect_to_service( service_name="sqs", endpoint_url=endpoint_url ).list_queues() except Exception: if print_error: LOG.exception("SQS health check failed") if expect_shutdown: assert out is None else: assert out.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200 def start_sqs(*args, **kwargs): if SQS_BACKEND_IMPL == "moto": return start_sqs_moto(*args, **kwargs) return start_sqs_elasticmq(*args, **kwargs) def patch_moto(): # patch add_message to disable event source mappings in moto def add_message(self, *args, **kwargs): mappings = self.lambda_event_source_mappings try: # temporarily set mappings to empty dict, to prevent moto from consuming messages from the queue self.lambda_event_source_mappings = {} return add_message_orig(self, *args, **kwargs) finally: self.lambda_event_source_mappings = mappings add_message_orig = Queue.add_message Queue.add_message = add_message _set_attributes_orig = Queue._set_attributes def _set_attributes(self, attributes, now=None): _set_attributes_orig(self, attributes, now) integer_fields = ["ReceiveMessageWaitTimeSeconds"] for key in integer_fields: attribute = camelcase_to_underscores(key) setattr(self, attribute, int(getattr(self, attribute, 0))) Queue._set_attributes = _set_attributes # pass additional globals (e.g., escaping methods) to template render method def response_template(self, template_str, *args, **kwargs): template = response_template_orig(self, template_str, *args, **kwargs) def _escape(val): try: return val and escape(to_str(val)) except Exception: return val def render(self, *args, **kwargs): return render_orig(*args, _escape=_escape, **kwargs) if not hasattr(template, "__patched"): render_orig = template.render template.render = types.MethodType(render, template) template.__patched = True return template response_template_orig = sqs_responses.SQSResponse.response_template sqs_responses.SQSResponse.response_template = response_template # escape message responses to allow for special characters like "<" sqs_responses.RECEIVE_MESSAGE_RESPONSE = sqs_responses.RECEIVE_MESSAGE_RESPONSE.replace( "<StringValue>{{ value.string_value }}</StringValue>", "<StringValue>{{ _escape(value.string_value) }}</StringValue>", ) # Fix issue with trailing slash # https://github.com/localstack/localstack/issues/2874 def sqs_responses_get_queue_name(self): try: queue_url = self.querystring.get("QueueUrl")[0] queue_name_data = queue_url.split("/")[4:] queue_name_data = [queue_attr for queue_attr in queue_name_data if queue_attr] queue_name = "/".join(queue_name_data) except TypeError: # Fallback to reading from the URL queue_name = self.path.split("/")[2] if not queue_name: raise QueueDoesNotExist() return queue_name sqs_responses.SQSResponse._get_queue_name = sqs_responses_get_queue_name def start_sqs_moto(port=None, asynchronous=False, update_listener=None): port = port or config.PORT_SQS patch_moto() result = start_moto_server( "sqs", port, name="SQS", asynchronous=asynchronous, update_listener=update_listener, ) global PORT_SQS_BACKEND PORT_SQS_BACKEND = result.service_port return result def start_sqs_elasticmq(port=None, asynchronous=False, update_listener=None): global PORT_SQS_BACKEND port = port or config.PORT_SQS install_elasticmq() PORT_SQS_BACKEND = get_free_tcp_port() # create config file config_params = """ include classpath("application.conf") node-address { protocol = http host = "%s" port = %s context-path = "" } rest-sqs { enabled = true bind-port = %s bind-hostname = "0.0.0.0" sqs-limits = strict } """ % ( LOCALSTACK_HOSTNAME, port, PORT_SQS_BACKEND, ) config_file = os.path.join(TMP_FOLDER, "sqs.%s.conf" % short_uid()) TMP_FILES.append(config_file) save_file(config_file, config_params) # start process cmd = "java -Dconfig.file=%s -Xmx%s -jar %s/elasticmq-server.jar" % ( config_file, MAX_HEAP_SIZE, INSTALL_DIR_ELASTICMQ, ) log_startup_message("SQS") start_proxy_for_service("sqs", port, PORT_SQS_BACKEND, update_listener) return do_run(cmd, asynchronous)
1
13,578
Thanks for this fix @eltone ! Wondering if we should make this a bit more resilient to upstream changes, e.g., via a regex replacement. Not critical, though - we can pick that up in a follow-up PR.. Thanks
localstack-localstack
py
@@ -91,6 +91,7 @@ func newSimpleClientV7(url string) (*clientV7, error) { var err error if client, err = elastic.NewClient( elastic.SetURL(url), + elastic.SetSniff(false), elastic.SetRetrier(retrier), ); err != nil { return nil, err
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package client import ( "context" "net/http" "strings" "time" "github.com/olivere/elastic/v7" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/server/common/config" "go.temporal.io/server/common/log" ) type ( // clientV7 implements Client clientV7 struct { esClient *elastic.Client } ) var _ Client = (*clientV7)(nil) var _ ClientV7 = (*clientV7)(nil) // newClientV7 create a ES client func newClientV7(config *config.Elasticsearch, httpClient *http.Client, logger log.Logger) (*clientV7, error) { options := []elastic.ClientOptionFunc{ elastic.SetURL(config.URL.String()), elastic.SetSniff(false), elastic.SetBasicAuth(config.Username, config.Password), // Disable health check so we don't block client creation (and thus temporal server startup) // if the ES instance happens to be down. elastic.SetHealthcheck(false), elastic.SetRetrier(elastic.NewBackoffRetrier(elastic.NewExponentialBackoff(128*time.Millisecond, 513*time.Millisecond))), // critical to ensure decode of int64 won't lose precision elastic.SetDecoder(&elastic.NumberDecoder{}), } options = append(options, getLoggerOptions(config.LogLevel, logger)...) if httpClient != nil { options = append(options, elastic.SetHttpClient(httpClient)) } client, err := elastic.NewClient(options...) if err != nil { return nil, err } // Re-enable the health check after client has successfully been created. client.Stop() err = elastic.SetHealthcheck(true)(client) if err != nil { return nil, err } client.Start() return &clientV7{esClient: client}, nil } func newSimpleClientV7(url string) (*clientV7, error) { retrier := elastic.NewBackoffRetrier(elastic.NewExponentialBackoff(128*time.Millisecond, 513*time.Millisecond)) var client *elastic.Client var err error if client, err = elastic.NewClient( elastic.SetURL(url), elastic.SetRetrier(retrier), ); err != nil { return nil, err } return &clientV7{esClient: client}, nil } func (c *clientV7) Search(ctx context.Context, p *SearchParameters) (*elastic.SearchResult, error) { searchService := c.esClient.Search(p.Index). Query(p.Query). SortBy(p.Sorter...) if p.PageSize != 0 { searchService.Size(p.PageSize) } if len(p.SearchAfter) != 0 { searchService.SearchAfter(p.SearchAfter...) } return searchService.Do(ctx) } func (c *clientV7) OpenPointInTime(ctx context.Context, index string, keepAliveInterval string) (string, error) { resp, err := c.esClient.OpenPointInTime(index).KeepAlive(keepAliveInterval).Do(ctx) if err != nil { return "", err } return resp.Id, nil } func (c *clientV7) ClosePointInTime(ctx context.Context, id string) (bool, error) { resp, err := c.esClient.ClosePointInTime(id).Do(ctx) if err != nil { return false, err } return resp.Succeeded, nil } func (c *clientV7) SearchWithDSLWithPIT(ctx context.Context, query string) (*elastic.SearchResult, error) { // When pit.id is specified index must not be used. searchResult, err := c.esClient.Search().Source(query).Do(ctx) return searchResult, err } func (c *clientV7) SearchWithDSL(ctx context.Context, index, query string) (*elastic.SearchResult, error) { searchResult, err := c.esClient.Search(index).Source(query).Do(ctx) return searchResult, err } func (c *clientV7) Count(ctx context.Context, index, query string) (int64, error) { return c.esClient.Count(index).BodyString(query).Do(ctx) } func (c *clientV7) RunBulkProcessor(ctx context.Context, p *BulkProcessorParameters) (BulkProcessor, error) { esBulkProcessor, err := c.esClient.BulkProcessor(). Name(p.Name). Workers(p.NumOfWorkers). BulkActions(p.BulkActions). BulkSize(p.BulkSize). FlushInterval(p.FlushInterval). Backoff(p.Backoff). Before(p.BeforeFunc). After(p.AfterFunc). Do(ctx) return newBulkProcessorV7(esBulkProcessor), err } func (c *clientV7) PutMapping(ctx context.Context, index string, mapping map[string]enumspb.IndexedValueType) (bool, error) { body := buildMappingBody(mapping) resp, err := c.esClient.PutMapping().Index(index).BodyJson(body).Do(ctx) if err != nil { return false, err } return resp.Acknowledged, err } func (c *clientV7) WaitForYellowStatus(ctx context.Context, index string) (string, error) { resp, err := c.esClient.ClusterHealth().Index(index).WaitForYellowStatus().Do(ctx) if err != nil { return "", err } return resp.Status, err } func (c *clientV7) GetMapping(ctx context.Context, index string) (map[string]string, error) { resp, err := c.esClient.GetMapping().Index(index).Do(ctx) if err != nil { return nil, err } return convertMappingBody(resp, index), err } func (c *clientV7) GetDateFieldType() string { return "date_nanos" } func (c *clientV7) CreateIndex(ctx context.Context, index string) (bool, error) { resp, err := c.esClient.CreateIndex(index).Do(ctx) if err != nil { return false, err } return resp.Acknowledged, nil } func (c *clientV7) IsNotFoundError(err error) bool { return elastic.IsNotFound(err) } func (c *clientV7) CatIndices(ctx context.Context) (elastic.CatIndicesResponse, error) { return c.esClient.CatIndices().Do(ctx) } func (c *clientV7) Bulk() BulkService { return newBulkServiceV7(c.esClient.Bulk()) } func (c *clientV7) IndexPutTemplate(ctx context.Context, templateName string, bodyString string) (bool, error) { resp, err := c.esClient.IndexPutTemplate(templateName).BodyString(bodyString).Do(ctx) if err != nil { return false, err } return resp.Acknowledged, nil } func (c *clientV7) IndexExists(ctx context.Context, indexName string) (bool, error) { return c.esClient.IndexExists(indexName).Do(ctx) } func (c *clientV7) DeleteIndex(ctx context.Context, indexName string) (bool, error) { resp, err := c.esClient.DeleteIndex(indexName).Do(ctx) if err != nil { return false, err } return resp.Acknowledged, nil } func (c *clientV7) IndexPutSettings(ctx context.Context, indexName string, bodyString string) (bool, error) { resp, err := c.esClient.IndexPutSettings(indexName).BodyString(bodyString).Do(ctx) if err != nil { return false, err } return resp.Acknowledged, nil } func (c *clientV7) IndexGetSettings(ctx context.Context, indexName string) (map[string]*elastic.IndicesGetSettingsResponse, error) { return c.esClient.IndexGetSettings(indexName).Do(ctx) } func (c *clientV7) Delete(ctx context.Context, indexName string, docID string, version int64) error { _, err := c.esClient.Delete(). Index(indexName). Id(docID). Version(version). VersionType(versionTypeExternal). Do(ctx) return err } func getLoggerOptions(logLevel string, logger log.Logger) []elastic.ClientOptionFunc { switch { case strings.EqualFold(logLevel, "trace"): return []elastic.ClientOptionFunc{ elastic.SetErrorLog(newErrorLogger(logger)), elastic.SetInfoLog(newInfoLogger(logger)), elastic.SetTraceLog(newInfoLogger(logger)), } case strings.EqualFold(logLevel, "info"): return []elastic.ClientOptionFunc{ elastic.SetErrorLog(newErrorLogger(logger)), elastic.SetInfoLog(newInfoLogger(logger)), } case strings.EqualFold(logLevel, "error"), logLevel == "": // Default is to log errors only. return []elastic.ClientOptionFunc{ elastic.SetErrorLog(newErrorLogger(logger)), } default: return nil } } func buildMappingBody(mapping map[string]enumspb.IndexedValueType) map[string]interface{} { properties := make(map[string]interface{}, len(mapping)) for fieldName, fieldType := range mapping { var typeMap map[string]interface{} switch fieldType { case enumspb.INDEXED_VALUE_TYPE_STRING: typeMap = map[string]interface{}{"type": "text"} case enumspb.INDEXED_VALUE_TYPE_KEYWORD: typeMap = map[string]interface{}{"type": "keyword"} case enumspb.INDEXED_VALUE_TYPE_INT: typeMap = map[string]interface{}{"type": "long"} case enumspb.INDEXED_VALUE_TYPE_DOUBLE: typeMap = map[string]interface{}{ "type": "scaled_float", "scaling_factor": 10000, } case enumspb.INDEXED_VALUE_TYPE_BOOL: typeMap = map[string]interface{}{"type": "boolean"} case enumspb.INDEXED_VALUE_TYPE_DATETIME: typeMap = map[string]interface{}{"type": "date_nanos"} } if typeMap != nil { properties[fieldName] = typeMap } } body := map[string]interface{}{ "properties": properties, } return body } func convertMappingBody(esMapping map[string]interface{}, indexName string) map[string]string { result := make(map[string]string) index, ok := esMapping[indexName] if !ok { return result } indexMap, ok := index.(map[string]interface{}) if !ok { return result } mappings, ok := indexMap["mappings"] if !ok { return result } mappingsMap, ok := mappings.(map[string]interface{}) if !ok { return result } // One more nested field on ES6. // TODO (alex): Remove with ES6 removal. if doc, ok := mappingsMap[docTypeV6]; ok { docMap, ok := doc.(map[string]interface{}) if !ok { return result } mappingsMap = docMap } properties, ok := mappingsMap["properties"] if !ok { return result } propMap, ok := properties.(map[string]interface{}) if !ok { return result } for fieldName, fieldProp := range propMap { fieldPropMap, ok := fieldProp.(map[string]interface{}) if !ok { continue } tYpe, ok := fieldPropMap["type"] if !ok { continue } typeStr, ok := tYpe.(string) if !ok { continue } result[fieldName] = typeStr } return result }
1
12,240
Should this be instead a configurable option?
temporalio-temporal
go
@@ -64,6 +64,13 @@ func (c *CreateDisks) run(ctx context.Context, s *Step) DError { } } + // Get the source snapshot link if using a source snapshot. + if cd.SourceSnapshot != "" { + if snapshot, ok := w.snapshots.get(cd.SourceSnapshot); ok { + cd.SourceSnapshot = snapshot.link + } + } + w.LogStepInfo(s.name, "CreateDisks", "Creating disk %q.", cd.Name) if err := w.ComputeClient.CreateDisk(cd.Project, cd.Zone, &cd.Disk); err != nil { // Fallback to pd-standard to avoid quota issue.
1
// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package daisy import ( "context" "fmt" "regexp" "strings" "sync" "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute" ) const ( pdStandard = "pd-standard" pdSsd = "pd-ssd" ) // CreateDisks is a Daisy CreateDisks workflow step. type CreateDisks []*Disk func (c *CreateDisks) populate(ctx context.Context, s *Step) DError { var errs DError for _, d := range *c { errs = addErrs(errs, d.populate(ctx, s)) } return errs } func (c *CreateDisks) validate(ctx context.Context, s *Step) DError { var errs DError for _, d := range *c { errs = addErrs(errs, d.validate(ctx, s)) } return errs } func (c *CreateDisks) run(ctx context.Context, s *Step) DError { var wg sync.WaitGroup w := s.w e := make(chan DError) for _, d := range *c { wg.Add(1) go func(cd *Disk) { defer wg.Done() // Get the source image link if using a source image. if cd.SourceImage != "" { if image, ok := w.images.get(cd.SourceImage); ok { cd.SourceImage = image.link } } w.LogStepInfo(s.name, "CreateDisks", "Creating disk %q.", cd.Name) if err := w.ComputeClient.CreateDisk(cd.Project, cd.Zone, &cd.Disk); err != nil { // Fallback to pd-standard to avoid quota issue. if cd.FallbackToPdStandard && strings.HasSuffix(cd.Type, pdSsd) && isQuotaExceeded(err) { w.LogStepInfo(s.name, "CreateDisks", "Falling back to pd-standard for disk %v. "+ "It may be caused by insufficient pd-ssd quota. Consider increasing pd-ssd quota to "+ "avoid using ps-standard for better performance.", cd.Name) cd.Type = strings.TrimRight(cd.Type, pdSsd) + pdStandard err = w.ComputeClient.CreateDisk(cd.Project, cd.Zone, &cd.Disk) } if err != nil { e <- newErr("failed to create disk", err) return } } cd.createdInWorkflow = true }(d) } go func() { wg.Wait() e <- nil }() select { case err := <-e: return err case <-w.Cancel: // Wait so disks being created now can be deleted. wg.Wait() return nil } } var operationErrorCodeRegex = regexp.MustCompile(fmt.Sprintf("(?m)^"+compute.OperationErrorCodeFormat+"$", "QUOTA_EXCEEDED")) func isQuotaExceeded(err error) bool { return operationErrorCodeRegex.FindIndex([]byte(err.Error())) != nil }
1
10,371
unit test needs an update?
GoogleCloudPlatform-compute-image-tools
go
@@ -12,7 +12,6 @@ module.exports = class Provider extends RequestClient { super(uppy, opts) this.provider = opts.provider this.id = this.provider - this.authProvider = opts.authProvider || this.provider this.name = this.opts.name || _getName(this.id) this.pluginId = this.opts.pluginId this.tokenKey = `companion-${this.pluginId}-auth-token`
1
'use strict' const RequestClient = require('./RequestClient') const tokenStorage = require('./tokenStorage') const _getName = (id) => { return id.split('-').map((s) => s.charAt(0).toUpperCase() + s.slice(1)).join(' ') } module.exports = class Provider extends RequestClient { constructor (uppy, opts) { super(uppy, opts) this.provider = opts.provider this.id = this.provider this.authProvider = opts.authProvider || this.provider this.name = this.opts.name || _getName(this.id) this.pluginId = this.opts.pluginId this.tokenKey = `companion-${this.pluginId}-auth-token` } headers () { return new Promise((resolve, reject) => { super.headers().then((headers) => { this.getAuthToken().then((token) => { resolve(Object.assign({}, headers, { 'uppy-auth-token': token })) }) }).catch(reject) }) } onReceiveResponse (response) { response = super.onReceiveResponse(response) const plugin = this.uppy.getPlugin(this.pluginId) const oldAuthenticated = plugin.getPluginState().authenticated const authenticated = oldAuthenticated ? response.status !== 401 : response.status < 400 plugin.setPluginState({ authenticated }) return response } // @todo(i.olarewaju) consider whether or not this method should be exposed setAuthToken (token) { return this.uppy.getPlugin(this.pluginId).storage.setItem(this.tokenKey, token) } getAuthToken () { return this.uppy.getPlugin(this.pluginId).storage.getItem(this.tokenKey) } authUrl () { return `${this.hostname}/${this.id}/connect` } fileUrl (id) { return `${this.hostname}/${this.id}/get/${id}` } list (directory) { return this.get(`${this.id}/list/${directory || ''}`) } logout () { return new Promise((resolve, reject) => { this.get(`${this.id}/logout`) .then((res) => { this.uppy.getPlugin(this.pluginId).storage.removeItem(this.tokenKey) .then(() => resolve(res)) .catch(reject) }).catch(reject) }) } static initPlugin (plugin, opts, defaultOpts) { plugin.type = 'acquirer' plugin.files = [] if (defaultOpts) { plugin.opts = Object.assign({}, defaultOpts, opts) } if (opts.serverUrl || opts.serverPattern) { throw new Error('`serverUrl` and `serverPattern` have been renamed to `companionUrl` and `companionAllowedHosts` respectively in the 0.30.5 release. Please consult the docs (for example, https://uppy.io/docs/instagram/ for the Instagram plugin) and use the updated options.`') } if (opts.companionAllowedHosts) { const pattern = opts.companionAllowedHosts // validate companionAllowedHosts param if (typeof pattern !== 'string' && !Array.isArray(pattern) && !(pattern instanceof RegExp)) { throw new TypeError(`${plugin.id}: the option "companionAllowedHosts" must be one of string, Array, RegExp`) } plugin.opts.companionAllowedHosts = pattern } else { // does not start with https:// if (/^(?!https?:\/\/).*$/i.test(opts.companionUrl)) { plugin.opts.companionAllowedHosts = `https://${opts.companionUrl.replace(/^\/\//, '')}` } else { plugin.opts.companionAllowedHosts = opts.companionUrl } } plugin.storage = plugin.opts.storage || tokenStorage } }
1
13,256
I realised that this is not used anywhere
transloadit-uppy
js
@@ -187,6 +187,7 @@ func watchConfig(cfg *config.Config, stopCh <-chan struct{}) { case <-stopCh: close(sigChan) return + default: } } }()
1
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "os" "time" chaosmeshv1alpha1 "github.com/pingcap/chaos-mesh/api/v1alpha1" apiWebhook "github.com/pingcap/chaos-mesh/api/webhook" "github.com/pingcap/chaos-mesh/controllers" "github.com/pingcap/chaos-mesh/pkg/flags" "github.com/pingcap/chaos-mesh/pkg/utils" "github.com/pingcap/chaos-mesh/pkg/version" "github.com/pingcap/chaos-mesh/pkg/webhook/config" "github.com/pingcap/chaos-mesh/pkg/webhook/config/watcher" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/webhook" // +kubebuilder:scaffold:imports ) var ( scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") // EventCoalesceWindow is the window for coalescing events from ConfigMapWatcher EventCoalesceWindow = time.Second * 3 ) var ( metricsAddr string enableLeaderElection bool certsDir string configDir string printVersion bool cmWatcherLabels = flags.NewMapStringStringFlag() watcherConfig = watcher.NewConfig() ) func init() { _ = clientgoscheme.AddToScheme(scheme) _ = chaosmeshv1alpha1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } func parseFlags() { flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") flag.StringVar(&certsDir, "certs", "/etc/webhook/certs", "The directory for storing certs key file and cert file") flag.StringVar(&configDir, "conf", "/etc/webhook/conf", "The directory for storing webhook config files") flag.StringVar(&watcherConfig.Namespace, "configmap-namespace", "", "Namespace to search for ConfigMaps to load Injection Configs from (default: current namespace)") flag.Var(&cmWatcherLabels, "configmap-labels", "Label pairs used to discover ConfigMaps in Kubernetes. These should be key1=value[,key2=val2,...]") flag.BoolVar(&printVersion, "version", false, "print version information and exit") flag.Parse() } func main() { parseFlags() version.PrintVersionInfo("Controller manager") if printVersion { os.Exit(0) } ctrl.SetLogger(zap.Logger(true)) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, MetricsBindAddress: metricsAddr, LeaderElection: enableLeaderElection, Port: 9443, }) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) } if err = (&controllers.PodChaosReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("PodChaos"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "PodChaos") os.Exit(1) } if err = (&controllers.NetworkChaosReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("NetworkChaos"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "NetworkChaos") os.Exit(1) } if err = (&controllers.IoChaosReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("IoChaos"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "IoChaos") os.Exit(1) } setupLog.Info("setting up webhook server") hookServer := mgr.GetWebhookServer() hookServer.CertDir = certsDir webhookConfig, err := config.LoadConfigDirectory(configDir) if err != nil { setupLog.Error(err, "load webhook config error") os.Exit(1) } stopCh := ctrl.SetupSignalHandler() watchConfig(webhookConfig, stopCh) hookServer.Register("/inject-v1-pod", &webhook.Admission{Handler: &apiWebhook.PodInjector{ Config: webhookConfig, }}) // +kubebuilder:scaffold:builder setupLog.Info("Starting manager") if err := mgr.Start(stopCh); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) } } func watchConfig(cfg *config.Config, stopCh <-chan struct{}) { watcherConfig.ConfigMapLabels = cmWatcherLabels.ToMapStringString() // start up the watcher, and get the first batch of ConfigMaps // to set in the config. // make sure to union this with any file configs we loaded from disk configWatcher, err := watcher.New(*watcherConfig) if err != nil { setupLog.Error(err, "unable to create ConfigMap watchers") os.Exit(1) } go func() { // watch for reconciliation signals, and grab configmaps, then update the running configuration // for the server sigChan := make(chan interface{}, 10) //debouncedChan := make(chan interface{}, 10) // debounce events from sigChan, so we dont hammer apiserver on reconciliation eventsCh := utils.Coalescer(EventCoalesceWindow, sigChan, stopCh) go func() { for { setupLog.Info("Launching watcher for ConfigMaps") err := configWatcher.Watch(sigChan, stopCh) if err != nil { switch err { case watcher.ErrWatchChannelClosed: setupLog.Error(err, "watcher got error, try to restart watcher") default: setupLog.Error(err, "unable to watch new ConfigMaps") } } select { case <-stopCh: close(sigChan) return } } }() for { select { case <-eventsCh: setupLog.Info("Triggering ConfigMap reconciliation") updatedInjectionConfigs, err := configWatcher.Get() if err != nil { setupLog.Error(err, "unable to get ConfigMaps") continue } if len(updatedInjectionConfigs) == 0 { setupLog.Info("No updated injection configs") continue } setupLog.Info("Got updated InjectionConfigs from reconciliation", "updated config count", len(updatedInjectionConfigs)) newInjectionConfigs := make([]*config.InjectionConfig, len(updatedInjectionConfigs)+len(cfg.Injections)) { i := 0 for k := range cfg.Injections { newInjectionConfigs[i] = cfg.Injections[k] i++ } for i, watched := range updatedInjectionConfigs { newInjectionConfigs[i+len(cfg.Injections)] = watched } } setupLog.Info("Updating server with newly loaded configurations", "origin configs count", len(cfg.Injections), "updated configs count", len(updatedInjectionConfigs)) cfg.ReplaceInjectionConfigs(newInjectionConfigs) setupLog.Info("Configuration replaced") case <-stopCh: break } } }() }
1
12,636
why add the default branch? this may cause there exist multiple configWatchers that notify the eventsCh at the same time?
chaos-mesh-chaos-mesh
go
@@ -1477,6 +1477,8 @@ func handleUSB(intr interrupt.Interrupt) { // clear stall request setEPINTENCLR(0, sam.USB_DEVICE_EPINTENCLR_STALL1) } + } else { + sendZlp() } // Now the actual transfer handlers, ignore endpoint number 0 (setup)
1
// +build sam,atsamd21 // Peripheral abstraction layer for the atsamd21. // // Datasheet: // http://ww1.microchip.com/downloads/en/DeviceDoc/SAMD21-Family-DataSheet-DS40001882D.pdf // package machine import ( "device/arm" "device/sam" "errors" "runtime/interrupt" "unsafe" ) type PinMode uint8 const ( PinAnalog PinMode = 1 PinSERCOM PinMode = 2 PinSERCOMAlt PinMode = 3 PinTimer PinMode = 4 PinTimerAlt PinMode = 5 PinCom PinMode = 6 //PinAC_CLK PinMode = 7 PinDigital PinMode = 8 PinInput PinMode = 9 PinInputPullup PinMode = 10 PinOutput PinMode = 11 PinPWM PinMode = PinTimer PinPWMAlt PinMode = PinTimerAlt PinInputPulldown PinMode = 12 ) const ( pinPadMapSERCOM0Pad0 byte = (0x10 << 1) | 0x00 pinPadMapSERCOM1Pad0 byte = (0x20 << 1) | 0x00 pinPadMapSERCOM2Pad0 byte = (0x30 << 1) | 0x00 pinPadMapSERCOM3Pad0 byte = (0x40 << 1) | 0x00 pinPadMapSERCOM4Pad0 byte = (0x50 << 1) | 0x00 pinPadMapSERCOM5Pad0 byte = (0x60 << 1) | 0x00 pinPadMapSERCOM0Pad2 byte = (0x10 << 1) | 0x10 pinPadMapSERCOM1Pad2 byte = (0x20 << 1) | 0x10 pinPadMapSERCOM2Pad2 byte = (0x30 << 1) | 0x10 pinPadMapSERCOM3Pad2 byte = (0x40 << 1) | 0x10 pinPadMapSERCOM4Pad2 byte = (0x50 << 1) | 0x10 pinPadMapSERCOM5Pad2 byte = (0x60 << 1) | 0x10 pinPadMapSERCOM0AltPad0 byte = (0x01 << 1) | 0x00 pinPadMapSERCOM1AltPad0 byte = (0x02 << 1) | 0x00 pinPadMapSERCOM2AltPad0 byte = (0x03 << 1) | 0x00 pinPadMapSERCOM3AltPad0 byte = (0x04 << 1) | 0x00 pinPadMapSERCOM4AltPad0 byte = (0x05 << 1) | 0x00 pinPadMapSERCOM5AltPad0 byte = (0x06 << 1) | 0x00 pinPadMapSERCOM0AltPad2 byte = (0x01 << 1) | 0x01 pinPadMapSERCOM1AltPad2 byte = (0x02 << 1) | 0x01 pinPadMapSERCOM2AltPad2 byte = (0x03 << 1) | 0x01 pinPadMapSERCOM3AltPad2 byte = (0x04 << 1) | 0x01 pinPadMapSERCOM4AltPad2 byte = (0x05 << 1) | 0x01 pinPadMapSERCOM5AltPad2 byte = (0x06 << 1) | 0x01 ) // pinPadMapping lists which pins have which SERCOMs attached to them. // The encoding is rather dense, with each byte encoding two pins and both // SERCOM and SERCOM-ALT. // // Observations: // * There are six SERCOMs. Those SERCOM numbers can be encoded in 3 bits. // * Even pad numbers are always on even pins, and odd pad numbers are always on // odd pins. // * Pin pads come in pairs. If PA00 has pad 0, then PA01 has pad 1. // With this information, we can encode SERCOM pin/pad numbers much more // efficiently. First of all, due to pads coming in pairs, we can ignore half // the pins: the information for an odd pin can be calculated easily from the // preceding even pin. And second, if odd pads are always on odd pins and even // pads on even pins, we can drop a single bit from the pad number. // // Each byte below is split in two nibbles. The 4 high bits are for SERCOM and // the 4 low bits are for SERCOM-ALT. Of each nibble, the 3 high bits encode the // SERCOM + 1 while the low bit encodes whether this is PAD0 or PAD2 (0 means // PAD0, 1 means PAD2). It encodes SERCOM + 1 instead of just the SERCOM number, // to make it easy to check whether a nibble is set at all. var pinPadMapping = [32]byte{ // page 21 PA00 / 2: 0 | pinPadMapSERCOM1AltPad0, PB08 / 2: 0 | pinPadMapSERCOM4AltPad0, PA04 / 2: 0 | pinPadMapSERCOM0AltPad0, PA06 / 2: 0 | pinPadMapSERCOM0AltPad2, PA08 / 2: pinPadMapSERCOM0Pad0 | pinPadMapSERCOM2AltPad0, PA10 / 2: pinPadMapSERCOM0Pad2 | pinPadMapSERCOM2AltPad2, // page 22 PB10 / 2: 0 | pinPadMapSERCOM4AltPad2, PB12 / 2: pinPadMapSERCOM4Pad0 | 0, PB14 / 2: pinPadMapSERCOM4Pad2 | 0, PA12 / 2: pinPadMapSERCOM2Pad0 | pinPadMapSERCOM4AltPad0, PA14 / 2: pinPadMapSERCOM2Pad2 | pinPadMapSERCOM4AltPad2, PA16 / 2: pinPadMapSERCOM1Pad0 | pinPadMapSERCOM3AltPad0, PA18 / 2: pinPadMapSERCOM1Pad2 | pinPadMapSERCOM3AltPad2, PB16 / 2: pinPadMapSERCOM5Pad0 | 0, PA20 / 2: pinPadMapSERCOM5Pad2 | pinPadMapSERCOM3AltPad2, PA22 / 2: pinPadMapSERCOM3Pad0 | pinPadMapSERCOM5AltPad0, PA24 / 2: pinPadMapSERCOM3Pad2 | pinPadMapSERCOM5AltPad2, // page 23 PB22 / 2: 0 | pinPadMapSERCOM5AltPad2, PA30 / 2: 0 | pinPadMapSERCOM1AltPad2, PB30 / 2: 0 | pinPadMapSERCOM5AltPad0, PB00 / 2: 0 | pinPadMapSERCOM5AltPad2, PB02 / 2: 0 | pinPadMapSERCOM5AltPad0, } // findPinPadMapping looks up the pad number and the pinmode for a given pin, // given a SERCOM number. The result can either be SERCOM, SERCOM-ALT, or "not // found" (indicated by returning ok=false). The pad number is returned to // calculate the DOPO/DIPO bitfields of the various serial peripherals. func findPinPadMapping(sercom uint8, pin Pin) (pinMode PinMode, pad uint32, ok bool) { nibbles := pinPadMapping[pin/2] upper := nibbles >> 4 lower := nibbles & 0xf if upper != 0 { // SERCOM if (upper>>1)-1 == sercom { pinMode = PinSERCOM pad |= uint32((upper & 1) << 1) ok = true } } if lower != 0 { // SERCOM-ALT if (lower>>1)-1 == sercom { pinMode = PinSERCOMAlt pad |= uint32((lower & 1) << 1) ok = true } } if ok { // The lower bit of the pad is the same as the lower bit of the pin number. pad |= uint32(pin & 1) } return } // InitADC initializes the ADC. func InitADC() { // ADC Bias Calibration // #define ADC_FUSES_BIASCAL_ADDR (NVMCTRL_OTP4 + 4) // #define ADC_FUSES_BIASCAL_Pos 3 /**< \brief (NVMCTRL_OTP4) ADC Bias Calibration */ // #define ADC_FUSES_BIASCAL_Msk (0x7u << ADC_FUSES_BIASCAL_Pos) // #define ADC_FUSES_BIASCAL(value) ((ADC_FUSES_BIASCAL_Msk & ((value) << ADC_FUSES_BIASCAL_Pos))) // #define ADC_FUSES_LINEARITY_0_ADDR NVMCTRL_OTP4 // #define ADC_FUSES_LINEARITY_0_Pos 27 /**< \brief (NVMCTRL_OTP4) ADC Linearity bits 4:0 */ // #define ADC_FUSES_LINEARITY_0_Msk (0x1Fu << ADC_FUSES_LINEARITY_0_Pos) // #define ADC_FUSES_LINEARITY_0(value) ((ADC_FUSES_LINEARITY_0_Msk & ((value) << ADC_FUSES_LINEARITY_0_Pos))) // #define ADC_FUSES_LINEARITY_1_ADDR (NVMCTRL_OTP4 + 4) // #define ADC_FUSES_LINEARITY_1_Pos 0 /**< \brief (NVMCTRL_OTP4) ADC Linearity bits 7:5 */ // #define ADC_FUSES_LINEARITY_1_Msk (0x7u << ADC_FUSES_LINEARITY_1_Pos) // #define ADC_FUSES_LINEARITY_1(value) ((ADC_FUSES_LINEARITY_1_Msk & ((value) << ADC_FUSES_LINEARITY_1_Pos))) biasFuse := *(*uint32)(unsafe.Pointer(uintptr(0x00806020) + 4)) bias := uint16(biasFuse>>3) & uint16(0x7) // ADC Linearity bits 4:0 linearity0Fuse := *(*uint32)(unsafe.Pointer(uintptr(0x00806020))) linearity := uint16(linearity0Fuse>>27) & uint16(0x1f) // ADC Linearity bits 7:5 linearity1Fuse := *(*uint32)(unsafe.Pointer(uintptr(0x00806020) + 4)) linearity |= uint16(linearity1Fuse) & uint16(0x7) << 5 // set calibration sam.ADC.CALIB.Set((bias << 8) | linearity) // Wait for synchronization waitADCSync() // Divide Clock by 32 with 12 bits resolution as default sam.ADC.CTRLB.Set((sam.ADC_CTRLB_PRESCALER_DIV32 << sam.ADC_CTRLB_PRESCALER_Pos) | (sam.ADC_CTRLB_RESSEL_12BIT << sam.ADC_CTRLB_RESSEL_Pos)) // Sampling Time Length sam.ADC.SAMPCTRL.Set(5) // Wait for synchronization waitADCSync() // Use internal ground sam.ADC.INPUTCTRL.Set(sam.ADC_INPUTCTRL_MUXNEG_GND << sam.ADC_INPUTCTRL_MUXNEG_Pos) // Averaging (see datasheet table in AVGCTRL register description) sam.ADC.AVGCTRL.Set((sam.ADC_AVGCTRL_SAMPLENUM_1 << sam.ADC_AVGCTRL_SAMPLENUM_Pos) | (0x0 << sam.ADC_AVGCTRL_ADJRES_Pos)) // Analog Reference is AREF pin (3.3v) sam.ADC.INPUTCTRL.SetBits(sam.ADC_INPUTCTRL_GAIN_DIV2 << sam.ADC_INPUTCTRL_GAIN_Pos) // 1/2 VDDANA = 0.5 * 3V3 = 1.65V sam.ADC.REFCTRL.SetBits(sam.ADC_REFCTRL_REFSEL_INTVCC1 << sam.ADC_REFCTRL_REFSEL_Pos) } // Configure configures a ADCPin to be able to be used to read data. func (a ADC) Configure() { a.Pin.Configure(PinConfig{Mode: PinAnalog}) return } // Get returns the current value of a ADC pin, in the range 0..0xffff. func (a ADC) Get() uint16 { ch := a.getADCChannel() // Selection for the positive ADC input sam.ADC.INPUTCTRL.ClearBits(sam.ADC_INPUTCTRL_MUXPOS_Msk) waitADCSync() sam.ADC.INPUTCTRL.SetBits(uint32(ch << sam.ADC_INPUTCTRL_MUXPOS_Pos)) waitADCSync() // Select internal ground for ADC input sam.ADC.INPUTCTRL.ClearBits(sam.ADC_INPUTCTRL_MUXNEG_Msk) waitADCSync() sam.ADC.INPUTCTRL.SetBits(sam.ADC_INPUTCTRL_MUXNEG_GND << sam.ADC_INPUTCTRL_MUXNEG_Pos) waitADCSync() // Enable ADC sam.ADC.CTRLA.SetBits(sam.ADC_CTRLA_ENABLE) waitADCSync() // Start conversion sam.ADC.SWTRIG.SetBits(sam.ADC_SWTRIG_START) waitADCSync() // wait for first conversion to finish to fix same issue as // https://github.com/arduino/ArduinoCore-samd/issues/446 for !sam.ADC.INTFLAG.HasBits(sam.ADC_INTFLAG_RESRDY) { } // Clear the Data Ready flag sam.ADC.INTFLAG.SetBits(sam.ADC_INTFLAG_RESRDY) waitADCSync() // Start conversion again, since first conversion after reference voltage changed is invalid. sam.ADC.SWTRIG.SetBits(sam.ADC_SWTRIG_START) waitADCSync() // Waiting for conversion to complete for !sam.ADC.INTFLAG.HasBits(sam.ADC_INTFLAG_RESRDY) { } val := sam.ADC.RESULT.Get() // Disable ADC sam.ADC.CTRLA.ClearBits(sam.ADC_CTRLA_ENABLE) waitADCSync() return uint16(val) << 4 // scales from 12 to 16-bit result } func (a ADC) getADCChannel() uint8 { switch a.Pin { case PA02: return 0 case PB08: return 2 case PB09: return 3 case PA04: return 4 case PA05: return 5 case PA06: return 6 case PA07: return 7 case PB02: return 10 case PB03: return 11 case PA09: return 17 case PA11: return 19 default: return 0 } } func waitADCSync() { for sam.ADC.STATUS.HasBits(sam.ADC_STATUS_SYNCBUSY) { } } // UART on the SAMD21. type UART struct { Buffer *RingBuffer Bus *sam.SERCOM_USART_Type SERCOM uint8 Interrupt interrupt.Interrupt } var ( // UART0 is actually a USB CDC interface. UART0 = USBCDC{Buffer: NewRingBuffer()} ) const ( sampleRate16X = 16 lsbFirst = 1 ) // Configure the UART. func (uart UART) Configure(config UARTConfig) error { // Default baud rate to 115200. if config.BaudRate == 0 { config.BaudRate = 115200 } // Use default pins if pins are not set. if config.TX == 0 && config.RX == 0 { // use default pins config.TX = UART_TX_PIN config.RX = UART_RX_PIN } // Determine transmit pinout. txPinMode, txPad, ok := findPinPadMapping(uart.SERCOM, config.TX) if !ok { return ErrInvalidOutputPin } var txPinOut uint32 // See table 25-9 of the datasheet (page 459) for how pads are mapped to // pinout values. switch txPad { case 0: txPinOut = 0 case 2: txPinOut = 1 default: // TODO: flow control (RTS/CTS) return ErrInvalidOutputPin } // Determine receive pinout. rxPinMode, rxPad, ok := findPinPadMapping(uart.SERCOM, config.RX) if !ok { return ErrInvalidInputPin } // As you can see in table 25-8 on page 459 of the datasheet, input pins // are mapped directly. rxPinOut := rxPad // configure pins config.TX.Configure(PinConfig{Mode: txPinMode}) config.RX.Configure(PinConfig{Mode: rxPinMode}) // reset SERCOM0 uart.Bus.CTRLA.SetBits(sam.SERCOM_USART_CTRLA_SWRST) for uart.Bus.CTRLA.HasBits(sam.SERCOM_USART_CTRLA_SWRST) || uart.Bus.SYNCBUSY.HasBits(sam.SERCOM_USART_SYNCBUSY_SWRST) { } // set UART mode/sample rate // SERCOM_USART_CTRLA_MODE(mode) | // SERCOM_USART_CTRLA_SAMPR(sampleRate); uart.Bus.CTRLA.Set((sam.SERCOM_USART_CTRLA_MODE_USART_INT_CLK << sam.SERCOM_USART_CTRLA_MODE_Pos) | (1 << sam.SERCOM_USART_CTRLA_SAMPR_Pos)) // sample rate of 16x // Set baud rate uart.SetBaudRate(config.BaudRate) // setup UART frame // SERCOM_USART_CTRLA_FORM( (parityMode == SERCOM_NO_PARITY ? 0 : 1) ) | // dataOrder << SERCOM_USART_CTRLA_DORD_Pos; uart.Bus.CTRLA.SetBits((0 << sam.SERCOM_USART_CTRLA_FORM_Pos) | // no parity (lsbFirst << sam.SERCOM_USART_CTRLA_DORD_Pos)) // data order // set UART stop bits/parity // SERCOM_USART_CTRLB_CHSIZE(charSize) | // nbStopBits << SERCOM_USART_CTRLB_SBMODE_Pos | // (parityMode == SERCOM_NO_PARITY ? 0 : parityMode) << SERCOM_USART_CTRLB_PMODE_Pos; //If no parity use default value uart.Bus.CTRLB.SetBits((0 << sam.SERCOM_USART_CTRLB_CHSIZE_Pos) | // 8 bits is 0 (0 << sam.SERCOM_USART_CTRLB_SBMODE_Pos) | // 1 stop bit is zero (0 << sam.SERCOM_USART_CTRLB_PMODE_Pos)) // no parity // set UART pads. This is not same as pins... // SERCOM_USART_CTRLA_TXPO(txPad) | // SERCOM_USART_CTRLA_RXPO(rxPad); uart.Bus.CTRLA.SetBits((txPinOut << sam.SERCOM_USART_CTRLA_TXPO_Pos) | (rxPinOut << sam.SERCOM_USART_CTRLA_RXPO_Pos)) // Enable Transceiver and Receiver //sercom->USART.CTRLB.reg |= SERCOM_USART_CTRLB_TXEN | SERCOM_USART_CTRLB_RXEN ; uart.Bus.CTRLB.SetBits(sam.SERCOM_USART_CTRLB_TXEN | sam.SERCOM_USART_CTRLB_RXEN) // Enable USART1 port. // sercom->USART.CTRLA.bit.ENABLE = 0x1u; uart.Bus.CTRLA.SetBits(sam.SERCOM_USART_CTRLA_ENABLE) for uart.Bus.SYNCBUSY.HasBits(sam.SERCOM_USART_SYNCBUSY_ENABLE) { } // setup interrupt on receive uart.Bus.INTENSET.Set(sam.SERCOM_USART_INTENSET_RXC) // Enable RX IRQ. uart.Interrupt.Enable() return nil } // SetBaudRate sets the communication speed for the UART. func (uart UART) SetBaudRate(br uint32) { // Asynchronous fractional mode (Table 24-2 in datasheet) // BAUD = fref / (sampleRateValue * fbaud) // (multiply by 8, to calculate fractional piece) // uint32_t baudTimes8 = (SystemCoreClock * 8) / (16 * baudrate); baud := (CPUFrequency() * 8) / (sampleRate16X * br) // sercom->USART.BAUD.FRAC.FP = (baudTimes8 % 8); // sercom->USART.BAUD.FRAC.BAUD = (baudTimes8 / 8); uart.Bus.BAUD.Set(uint16(((baud % 8) << sam.SERCOM_USART_BAUD_FRAC_MODE_FP_Pos) | ((baud / 8) << sam.SERCOM_USART_BAUD_FRAC_MODE_BAUD_Pos))) } // WriteByte writes a byte of data to the UART. func (uart UART) WriteByte(c byte) error { // wait until ready to receive for !uart.Bus.INTFLAG.HasBits(sam.SERCOM_USART_INTFLAG_DRE) { } uart.Bus.DATA.Set(uint16(c)) return nil } // handleInterrupt should be called from the appropriate interrupt handler for // this UART instance. func (uart *UART) handleInterrupt(interrupt.Interrupt) { // should reset IRQ uart.Receive(byte((uart.Bus.DATA.Get() & 0xFF))) uart.Bus.INTFLAG.SetBits(sam.SERCOM_USART_INTFLAG_RXC) } // I2C on the SAMD21. type I2C struct { Bus *sam.SERCOM_I2CM_Type SERCOM uint8 } // I2CConfig is used to store config info for I2C. type I2CConfig struct { Frequency uint32 SCL Pin SDA Pin } const ( // Default rise time in nanoseconds, based on 4.7K ohm pull up resistors riseTimeNanoseconds = 125 // wire bus states wireUnknownState = 0 wireIdleState = 1 wireOwnerState = 2 wireBusyState = 3 // wire commands wireCmdNoAction = 0 wireCmdRepeatStart = 1 wireCmdRead = 2 wireCmdStop = 3 ) const i2cTimeout = 1000 // Configure is intended to setup the I2C interface. func (i2c I2C) Configure(config I2CConfig) error { // Default I2C bus speed is 100 kHz. if config.Frequency == 0 { config.Frequency = TWI_FREQ_100KHZ } if config.SDA == 0 && config.SCL == 0 { config.SDA = SDA_PIN config.SCL = SCL_PIN } sclPinMode, sclPad, ok := findPinPadMapping(i2c.SERCOM, config.SCL) if !ok || sclPad != 1 { // SCL must be on pad 1, according to section 27.5 of the datasheet. // Note: this is not an exhaustive test for I2C support on the pin: not // all pins support I2C. return ErrInvalidClockPin } sdaPinMode, sdaPad, ok := findPinPadMapping(i2c.SERCOM, config.SDA) if !ok || sdaPad != 0 { // SDA must be on pad 0, according to section 27.5 of the datasheet. // Note: this is not an exhaustive test for I2C support on the pin: not // all pins support I2C. return ErrInvalidDataPin } // reset SERCOM i2c.Bus.CTRLA.SetBits(sam.SERCOM_I2CM_CTRLA_SWRST) for i2c.Bus.CTRLA.HasBits(sam.SERCOM_I2CM_CTRLA_SWRST) || i2c.Bus.SYNCBUSY.HasBits(sam.SERCOM_I2CM_SYNCBUSY_SWRST) { } // Set i2c master mode //SERCOM_I2CM_CTRLA_MODE( I2C_MASTER_OPERATION ) i2c.Bus.CTRLA.Set(sam.SERCOM_I2CM_CTRLA_MODE_I2C_MASTER << sam.SERCOM_I2CM_CTRLA_MODE_Pos) // | i2c.SetBaudRate(config.Frequency) // Enable I2CM port. // sercom->USART.CTRLA.bit.ENABLE = 0x1u; i2c.Bus.CTRLA.SetBits(sam.SERCOM_I2CM_CTRLA_ENABLE) for i2c.Bus.SYNCBUSY.HasBits(sam.SERCOM_I2CM_SYNCBUSY_ENABLE) { } // set bus idle mode i2c.Bus.STATUS.SetBits(wireIdleState << sam.SERCOM_I2CM_STATUS_BUSSTATE_Pos) for i2c.Bus.SYNCBUSY.HasBits(sam.SERCOM_I2CM_SYNCBUSY_SYSOP) { } // enable pins config.SDA.Configure(PinConfig{Mode: sdaPinMode}) config.SCL.Configure(PinConfig{Mode: sclPinMode}) return nil } // SetBaudRate sets the communication speed for the I2C. func (i2c I2C) SetBaudRate(br uint32) { // Synchronous arithmetic baudrate, via Arduino SAMD implementation: // SystemCoreClock / ( 2 * baudrate) - 5 - (((SystemCoreClock / 1000000) * WIRE_RISE_TIME_NANOSECONDS) / (2 * 1000)); baud := CPUFrequency()/(2*br) - 5 - (((CPUFrequency() / 1000000) * riseTimeNanoseconds) / (2 * 1000)) i2c.Bus.BAUD.Set(baud) } // Tx does a single I2C transaction at the specified address. // It clocks out the given address, writes the bytes in w, reads back len(r) // bytes and stores them in r, and generates a stop condition on the bus. func (i2c I2C) Tx(addr uint16, w, r []byte) error { var err error if len(w) != 0 { // send start/address for write i2c.sendAddress(addr, true) // wait until transmission complete timeout := i2cTimeout for !i2c.Bus.INTFLAG.HasBits(sam.SERCOM_I2CM_INTFLAG_MB) { timeout-- if timeout == 0 { return errors.New("I2C timeout on ready to write data") } } // ACK received (0: ACK, 1: NACK) if i2c.Bus.STATUS.HasBits(sam.SERCOM_I2CM_STATUS_RXNACK) { return errors.New("I2C write error: expected ACK not NACK") } // write data for _, b := range w { err = i2c.WriteByte(b) if err != nil { return err } } err = i2c.signalStop() if err != nil { return err } } if len(r) != 0 { // send start/address for read i2c.sendAddress(addr, false) // wait transmission complete for !i2c.Bus.INTFLAG.HasBits(sam.SERCOM_I2CM_INTFLAG_SB) { // If the slave NACKS the address, the MB bit will be set. // In that case, send a stop condition and return error. if i2c.Bus.INTFLAG.HasBits(sam.SERCOM_I2CM_INTFLAG_MB) { i2c.Bus.CTRLB.SetBits(wireCmdStop << sam.SERCOM_I2CM_CTRLB_CMD_Pos) // Stop condition return errors.New("I2C read error: expected ACK not NACK") } } // ACK received (0: ACK, 1: NACK) if i2c.Bus.STATUS.HasBits(sam.SERCOM_I2CM_STATUS_RXNACK) { return errors.New("I2C read error: expected ACK not NACK") } // read first byte r[0] = i2c.readByte() for i := 1; i < len(r); i++ { // Send an ACK i2c.Bus.CTRLB.ClearBits(sam.SERCOM_I2CM_CTRLB_ACKACT) i2c.signalRead() // Read data and send the ACK r[i] = i2c.readByte() } // Send NACK to end transmission i2c.Bus.CTRLB.SetBits(sam.SERCOM_I2CM_CTRLB_ACKACT) err = i2c.signalStop() if err != nil { return err } } return nil } // WriteByte writes a single byte to the I2C bus. func (i2c I2C) WriteByte(data byte) error { // Send data byte i2c.Bus.DATA.Set(data) // wait until transmission successful timeout := i2cTimeout for !i2c.Bus.INTFLAG.HasBits(sam.SERCOM_I2CM_INTFLAG_MB) { // check for bus error if sam.SERCOM3_I2CM.STATUS.HasBits(sam.SERCOM_I2CM_STATUS_BUSERR) { return errors.New("I2C bus error") } timeout-- if timeout == 0 { return errors.New("I2C timeout on write data") } } if i2c.Bus.STATUS.HasBits(sam.SERCOM_I2CM_STATUS_RXNACK) { return errors.New("I2C write error: expected ACK not NACK") } return nil } // sendAddress sends the address and start signal func (i2c I2C) sendAddress(address uint16, write bool) error { data := (address << 1) if !write { data |= 1 // set read flag } // wait until bus ready timeout := i2cTimeout for !i2c.Bus.STATUS.HasBits(wireIdleState<<sam.SERCOM_I2CM_STATUS_BUSSTATE_Pos) && !i2c.Bus.STATUS.HasBits(wireOwnerState<<sam.SERCOM_I2CM_STATUS_BUSSTATE_Pos) { timeout-- if timeout == 0 { return errors.New("I2C timeout on bus ready") } } i2c.Bus.ADDR.Set(uint32(data)) return nil } func (i2c I2C) signalStop() error { i2c.Bus.CTRLB.SetBits(wireCmdStop << sam.SERCOM_I2CM_CTRLB_CMD_Pos) // Stop command timeout := i2cTimeout for i2c.Bus.SYNCBUSY.HasBits(sam.SERCOM_I2CM_SYNCBUSY_SYSOP) { timeout-- if timeout == 0 { return errors.New("I2C timeout on signal stop") } } return nil } func (i2c I2C) signalRead() error { i2c.Bus.CTRLB.SetBits(wireCmdRead << sam.SERCOM_I2CM_CTRLB_CMD_Pos) // Read command timeout := i2cTimeout for i2c.Bus.SYNCBUSY.HasBits(sam.SERCOM_I2CM_SYNCBUSY_SYSOP) { timeout-- if timeout == 0 { return errors.New("I2C timeout on signal read") } } return nil } func (i2c I2C) readByte() byte { for !i2c.Bus.INTFLAG.HasBits(sam.SERCOM_I2CM_INTFLAG_SB) { } return byte(i2c.Bus.DATA.Get()) } // I2S on the SAMD21. // I2S type I2S struct { Bus *sam.I2S_Type } // Configure is used to configure the I2S interface. You must call this // before you can use the I2S bus. func (i2s I2S) Configure(config I2SConfig) { // handle defaults if config.SCK == 0 { config.SCK = I2S_SCK_PIN config.WS = I2S_WS_PIN config.SD = I2S_SD_PIN } if config.AudioFrequency == 0 { config.AudioFrequency = 48000 } if config.DataFormat == I2SDataFormatDefault { if config.Stereo { config.DataFormat = I2SDataFormat16bit } else { config.DataFormat = I2SDataFormat32bit } } // Turn on clock for I2S sam.PM.APBCMASK.SetBits(sam.PM_APBCMASK_I2S_) // setting clock rate for sample. division_factor := CPUFrequency() / (config.AudioFrequency * uint32(config.DataFormat)) // Switch Generic Clock Generator 3 to DFLL48M. sam.GCLK.GENDIV.Set((sam.GCLK_CLKCTRL_GEN_GCLK3 << sam.GCLK_GENDIV_ID_Pos) | (division_factor << sam.GCLK_GENDIV_DIV_Pos)) waitForSync() sam.GCLK.GENCTRL.Set((sam.GCLK_CLKCTRL_GEN_GCLK3 << sam.GCLK_GENCTRL_ID_Pos) | (sam.GCLK_GENCTRL_SRC_DFLL48M << sam.GCLK_GENCTRL_SRC_Pos) | sam.GCLK_GENCTRL_IDC | sam.GCLK_GENCTRL_GENEN) waitForSync() // Use Generic Clock Generator 3 as source for I2S. sam.GCLK.CLKCTRL.Set((sam.GCLK_CLKCTRL_ID_I2S_0 << sam.GCLK_CLKCTRL_ID_Pos) | (sam.GCLK_CLKCTRL_GEN_GCLK3 << sam.GCLK_CLKCTRL_GEN_Pos) | sam.GCLK_CLKCTRL_CLKEN) waitForSync() // reset the device i2s.Bus.CTRLA.SetBits(sam.I2S_CTRLA_SWRST) for i2s.Bus.SYNCBUSY.HasBits(sam.I2S_SYNCBUSY_SWRST) { } // disable device before continuing for i2s.Bus.SYNCBUSY.HasBits(sam.I2S_SYNCBUSY_ENABLE) { } i2s.Bus.CTRLA.ClearBits(sam.I2S_CTRLA_ENABLE) // setup clock if config.ClockSource == I2SClockSourceInternal { // TODO: make sure correct for I2S output // set serial clock select pin i2s.Bus.CLKCTRL0.SetBits(sam.I2S_CLKCTRL_SCKSEL) // set frame select pin i2s.Bus.CLKCTRL0.SetBits(sam.I2S_CLKCTRL_FSSEL) } else { // Configure FS generation from SCK clock. i2s.Bus.CLKCTRL0.ClearBits(sam.I2S_CLKCTRL_FSSEL) } if config.Standard == I2StandardPhilips { // set 1-bit delay i2s.Bus.CLKCTRL0.SetBits(sam.I2S_CLKCTRL_BITDELAY) } else { // set 0-bit delay i2s.Bus.CLKCTRL0.ClearBits(sam.I2S_CLKCTRL_BITDELAY) } // set number of slots. if config.Stereo { i2s.Bus.CLKCTRL0.SetBits(1 << sam.I2S_CLKCTRL_NBSLOTS_Pos) } else { i2s.Bus.CLKCTRL0.ClearBits(1 << sam.I2S_CLKCTRL_NBSLOTS_Pos) } // set slot size switch config.DataFormat { case I2SDataFormat8bit: i2s.Bus.CLKCTRL0.SetBits(sam.I2S_CLKCTRL_SLOTSIZE_8) case I2SDataFormat16bit: i2s.Bus.CLKCTRL0.SetBits(sam.I2S_CLKCTRL_SLOTSIZE_16) case I2SDataFormat24bit: i2s.Bus.CLKCTRL0.SetBits(sam.I2S_CLKCTRL_SLOTSIZE_24) case I2SDataFormat32bit: i2s.Bus.CLKCTRL0.SetBits(sam.I2S_CLKCTRL_SLOTSIZE_32) } // configure pin for clock config.SCK.Configure(PinConfig{Mode: PinCom}) // configure pin for WS, if needed if config.WS != NoPin { config.WS.Configure(PinConfig{Mode: PinCom}) } // now set serializer data size. switch config.DataFormat { case I2SDataFormat8bit: i2s.Bus.SERCTRL1.SetBits(sam.I2S_SERCTRL_DATASIZE_8 << sam.I2S_SERCTRL_DATASIZE_Pos) case I2SDataFormat16bit: i2s.Bus.SERCTRL1.SetBits(sam.I2S_SERCTRL_DATASIZE_16 << sam.I2S_SERCTRL_DATASIZE_Pos) case I2SDataFormat24bit: i2s.Bus.SERCTRL1.SetBits(sam.I2S_SERCTRL_DATASIZE_24 << sam.I2S_SERCTRL_DATASIZE_Pos) case I2SDataFormat32bit: case I2SDataFormatDefault: i2s.Bus.SERCTRL1.SetBits(sam.I2S_SERCTRL_DATASIZE_32 << sam.I2S_SERCTRL_DATASIZE_Pos) } // set serializer slot adjustment if config.Standard == I2SStandardLSB { // adjust right i2s.Bus.SERCTRL1.ClearBits(sam.I2S_SERCTRL_SLOTADJ) // transfer LSB first i2s.Bus.SERCTRL1.SetBits(sam.I2S_SERCTRL_BITREV) } else { // adjust left i2s.Bus.SERCTRL1.SetBits(sam.I2S_SERCTRL_SLOTADJ) } // set serializer mode. if config.Mode == I2SModePDM { i2s.Bus.SERCTRL1.SetBits(sam.I2S_SERCTRL_SERMODE_PDM2) } else { i2s.Bus.SERCTRL1.SetBits(sam.I2S_SERCTRL_SERMODE_RX) } // configure data pin config.SD.Configure(PinConfig{Mode: PinCom}) // re-enable i2s.Bus.CTRLA.SetBits(sam.I2S_CTRLA_ENABLE) for i2s.Bus.SYNCBUSY.HasBits(sam.I2S_SYNCBUSY_ENABLE) { } // enable i2s clock i2s.Bus.CTRLA.SetBits(sam.I2S_CTRLA_CKEN0) for i2s.Bus.SYNCBUSY.HasBits(sam.I2S_SYNCBUSY_CKEN0) { } // enable i2s serializer i2s.Bus.CTRLA.SetBits(sam.I2S_CTRLA_SEREN1) for i2s.Bus.SYNCBUSY.HasBits(sam.I2S_SYNCBUSY_SEREN1) { } } // Read data from the I2S bus into the provided slice. // The I2S bus must already have been configured correctly. func (i2s I2S) Read(p []uint32) (n int, err error) { i := 0 for i = 0; i < len(p); i++ { // Wait until ready for !i2s.Bus.INTFLAG.HasBits(sam.I2S_INTFLAG_RXRDY1) { } for i2s.Bus.SYNCBUSY.HasBits(sam.I2S_SYNCBUSY_DATA1) { } // read data p[i] = i2s.Bus.DATA1.Get() // indicate read complete i2s.Bus.INTFLAG.Set(sam.I2S_INTFLAG_RXRDY1) } return i, nil } // Write data to the I2S bus from the provided slice. // The I2S bus must already have been configured correctly. func (i2s I2S) Write(p []uint32) (n int, err error) { i := 0 for i = 0; i < len(p); i++ { // Wait until ready for !i2s.Bus.INTFLAG.HasBits(sam.I2S_INTFLAG_TXRDY1) { } for i2s.Bus.SYNCBUSY.HasBits(sam.I2S_SYNCBUSY_DATA1) { } // write data i2s.Bus.DATA1.Set(p[i]) // indicate write complete i2s.Bus.INTFLAG.Set(sam.I2S_INTFLAG_TXRDY1) } return i, nil } // Close the I2S bus. func (i2s I2S) Close() error { // Sync wait for i2s.Bus.SYNCBUSY.HasBits(sam.I2S_SYNCBUSY_ENABLE) { } // disable I2S i2s.Bus.CTRLA.ClearBits(sam.I2S_CTRLA_ENABLE) return nil } func waitForSync() { for sam.GCLK.STATUS.HasBits(sam.GCLK_STATUS_SYNCBUSY) { } } // SPI type SPI struct { Bus *sam.SERCOM_SPI_Type SERCOM uint8 } // SPIConfig is used to store config info for SPI. type SPIConfig struct { Frequency uint32 SCK Pin MOSI Pin MISO Pin LSBFirst bool Mode uint8 } // Configure is intended to setup the SPI interface. func (spi SPI) Configure(config SPIConfig) error { // Use default pins if not set. if config.SCK == 0 && config.MOSI == 0 && config.MISO == 0 { config.SCK = SPI0_SCK_PIN config.MOSI = SPI0_MOSI_PIN config.MISO = SPI0_MISO_PIN } // set default frequency if config.Frequency == 0 { config.Frequency = 4000000 } // Determine the input pinout (for MISO). misoPinMode, misoPad, ok := findPinPadMapping(spi.SERCOM, config.MISO) if !ok { return ErrInvalidInputPin } dataInPinout := misoPad // mapped directly // Determine the output pinout (for MOSI/SCK). // See table 26-7 on page 494 of the datasheet. var dataOutPinout uint32 sckPinMode, sckPad, ok := findPinPadMapping(spi.SERCOM, config.SCK) if !ok { return ErrInvalidOutputPin } mosiPinMode, mosiPad, ok := findPinPadMapping(spi.SERCOM, config.MOSI) if !ok { return ErrInvalidOutputPin } switch sckPad { case 1: switch mosiPad { case 0: dataOutPinout = 0x0 case 3: dataOutPinout = 0x2 default: return ErrInvalidOutputPin } case 3: switch mosiPad { case 2: dataOutPinout = 0x1 case 0: dataOutPinout = 0x3 default: return ErrInvalidOutputPin } default: return ErrInvalidOutputPin } // Disable SPI port. spi.Bus.CTRLA.ClearBits(sam.SERCOM_SPI_CTRLA_ENABLE) for spi.Bus.SYNCBUSY.HasBits(sam.SERCOM_SPI_SYNCBUSY_ENABLE) { } // enable pins config.SCK.Configure(PinConfig{Mode: sckPinMode}) config.MOSI.Configure(PinConfig{Mode: mosiPinMode}) config.MISO.Configure(PinConfig{Mode: misoPinMode}) // reset SERCOM spi.Bus.CTRLA.SetBits(sam.SERCOM_SPI_CTRLA_SWRST) for spi.Bus.CTRLA.HasBits(sam.SERCOM_SPI_CTRLA_SWRST) || spi.Bus.SYNCBUSY.HasBits(sam.SERCOM_SPI_SYNCBUSY_SWRST) { } // set bit transfer order dataOrder := uint32(0) if config.LSBFirst { dataOrder = 1 } // Set SPI master spi.Bus.CTRLA.Set((sam.SERCOM_SPI_CTRLA_MODE_SPI_MASTER << sam.SERCOM_SPI_CTRLA_MODE_Pos) | (dataOutPinout << sam.SERCOM_SPI_CTRLA_DOPO_Pos) | (dataInPinout << sam.SERCOM_SPI_CTRLA_DIPO_Pos) | (dataOrder << sam.SERCOM_SPI_CTRLA_DORD_Pos)) spi.Bus.CTRLB.SetBits((0 << sam.SERCOM_SPI_CTRLB_CHSIZE_Pos) | // 8bit char size sam.SERCOM_SPI_CTRLB_RXEN) // receive enable for spi.Bus.SYNCBUSY.HasBits(sam.SERCOM_SPI_SYNCBUSY_CTRLB) { } // set mode switch config.Mode { case 0: spi.Bus.CTRLA.ClearBits(sam.SERCOM_SPI_CTRLA_CPHA) spi.Bus.CTRLA.ClearBits(sam.SERCOM_SPI_CTRLA_CPOL) case 1: spi.Bus.CTRLA.SetBits(sam.SERCOM_SPI_CTRLA_CPHA) spi.Bus.CTRLA.ClearBits(sam.SERCOM_SPI_CTRLA_CPOL) case 2: spi.Bus.CTRLA.ClearBits(sam.SERCOM_SPI_CTRLA_CPHA) spi.Bus.CTRLA.SetBits(sam.SERCOM_SPI_CTRLA_CPOL) case 3: spi.Bus.CTRLA.SetBits(sam.SERCOM_SPI_CTRLA_CPHA | sam.SERCOM_SPI_CTRLA_CPOL) default: // to mode 0 spi.Bus.CTRLA.ClearBits(sam.SERCOM_SPI_CTRLA_CPHA) spi.Bus.CTRLA.ClearBits(sam.SERCOM_SPI_CTRLA_CPOL) } // Set synch speed for SPI baudRate := (CPUFrequency() / (2 * config.Frequency)) - 1 spi.Bus.BAUD.Set(uint8(baudRate)) // Enable SPI port. spi.Bus.CTRLA.SetBits(sam.SERCOM_SPI_CTRLA_ENABLE) for spi.Bus.SYNCBUSY.HasBits(sam.SERCOM_SPI_SYNCBUSY_ENABLE) { } return nil } // Transfer writes/reads a single byte using the SPI interface. func (spi SPI) Transfer(w byte) (byte, error) { // write data spi.Bus.DATA.Set(uint32(w)) // wait for receive for !spi.Bus.INTFLAG.HasBits(sam.SERCOM_SPI_INTFLAG_RXC) { } // return data return byte(spi.Bus.DATA.Get()), nil } // PWM const period = 0xFFFF // InitPWM initializes the PWM interface. func InitPWM() { // turn on timer clocks used for PWM sam.PM.APBCMASK.SetBits(sam.PM_APBCMASK_TCC0_ | sam.PM_APBCMASK_TCC1_ | sam.PM_APBCMASK_TCC2_) // Use GCLK0 for TCC0/TCC1 sam.GCLK.CLKCTRL.Set((sam.GCLK_CLKCTRL_ID_TCC0_TCC1 << sam.GCLK_CLKCTRL_ID_Pos) | (sam.GCLK_CLKCTRL_GEN_GCLK0 << sam.GCLK_CLKCTRL_GEN_Pos) | sam.GCLK_CLKCTRL_CLKEN) for sam.GCLK.STATUS.HasBits(sam.GCLK_STATUS_SYNCBUSY) { } // Use GCLK0 for TCC2/TC3 sam.GCLK.CLKCTRL.Set((sam.GCLK_CLKCTRL_ID_TCC2_TC3 << sam.GCLK_CLKCTRL_ID_Pos) | (sam.GCLK_CLKCTRL_GEN_GCLK0 << sam.GCLK_CLKCTRL_GEN_Pos) | sam.GCLK_CLKCTRL_CLKEN) for sam.GCLK.STATUS.HasBits(sam.GCLK_STATUS_SYNCBUSY) { } } // Configure configures a PWM pin for output. func (pwm PWM) Configure() { // figure out which TCCX timer for this pin timer := pwm.getTimer() // disable timer timer.CTRLA.ClearBits(sam.TCC_CTRLA_ENABLE) // Wait for synchronization for timer.SYNCBUSY.HasBits(sam.TCC_SYNCBUSY_ENABLE) { } // Use "Normal PWM" (single-slope PWM) timer.WAVE.SetBits(sam.TCC_WAVE_WAVEGEN_NPWM) // Wait for synchronization for timer.SYNCBUSY.HasBits(sam.TCC_SYNCBUSY_WAVE) { } // Set the period (the number to count to (TOP) before resetting timer) //TCC0->PER.reg = period; timer.PER.Set(period) // Wait for synchronization for timer.SYNCBUSY.HasBits(sam.TCC_SYNCBUSY_PER) { } // Set pin as output sam.PORT.DIRSET0.Set(1 << uint8(pwm.Pin)) // Set pin to low sam.PORT.OUTCLR0.Set(1 << uint8(pwm.Pin)) // Enable the port multiplexer for pin pwm.setPinCfg(sam.PORT_PINCFG0_PMUXEN) // Connect TCCX timer to pin. // we normally use the F channel aka ALT pwmConfig := PinPWMAlt // in the case of PA6 or PA7 we have to use E channel if pwm.Pin == 6 || pwm.Pin == 7 { pwmConfig = PinPWM } if pwm.Pin&1 > 0 { // odd pin, so save the even pins val := pwm.getPMux() & sam.PORT_PMUX0_PMUXE_Msk pwm.setPMux(val | uint8(pwmConfig<<sam.PORT_PMUX0_PMUXO_Pos)) } else { // even pin, so save the odd pins val := pwm.getPMux() & sam.PORT_PMUX0_PMUXO_Msk pwm.setPMux(val | uint8(pwmConfig<<sam.PORT_PMUX0_PMUXE_Pos)) } } // Set turns on the duty cycle for a PWM pin using the provided value. func (pwm PWM) Set(value uint16) { // figure out which TCCX timer for this pin timer := pwm.getTimer() // disable output timer.CTRLA.ClearBits(sam.TCC_CTRLA_ENABLE) // Wait for synchronization for timer.SYNCBUSY.HasBits(sam.TCC_SYNCBUSY_ENABLE) { } // Set PWM signal to output duty cycle pwm.setChannel(uint32(value)) // Wait for synchronization on all channels for timer.SYNCBUSY.HasBits(sam.TCC_SYNCBUSY_CC0 | sam.TCC_SYNCBUSY_CC1 | sam.TCC_SYNCBUSY_CC2 | sam.TCC_SYNCBUSY_CC3) { } // enable timer.CTRLA.SetBits(sam.TCC_CTRLA_ENABLE) // Wait for synchronization for timer.SYNCBUSY.HasBits(sam.TCC_SYNCBUSY_ENABLE) { } } // getPMux returns the value for the correct PMUX register for this pin. func (pwm PWM) getPMux() uint8 { return pwm.Pin.getPMux() } // setPMux sets the value for the correct PMUX register for this pin. func (pwm PWM) setPMux(val uint8) { pwm.Pin.setPMux(val) } // getPinCfg returns the value for the correct PINCFG register for this pin. func (pwm PWM) getPinCfg() uint8 { return pwm.Pin.getPinCfg() } // setPinCfg sets the value for the correct PINCFG register for this pin. func (pwm PWM) setPinCfg(val uint8) { pwm.Pin.setPinCfg(val) } // getTimer returns the timer to be used for PWM on this pin func (pwm PWM) getTimer() *sam.TCC_Type { switch pwm.Pin { case 6: return sam.TCC1 case 7: return sam.TCC1 case 8: return sam.TCC1 case 9: return sam.TCC1 case 14: return sam.TCC0 case 15: return sam.TCC0 case 16: return sam.TCC0 case 17: return sam.TCC0 case 18: return sam.TCC0 case 19: return sam.TCC0 case 20: return sam.TCC0 case 21: return sam.TCC0 default: return nil // not supported on this pin } } // setChannel sets the value for the correct channel for PWM on this pin func (pwm PWM) setChannel(val uint32) { switch pwm.Pin { case 6: pwm.getTimer().CC0.Set(val) case 7: pwm.getTimer().CC1.Set(val) case 8: pwm.getTimer().CC0.Set(val) case 9: pwm.getTimer().CC1.Set(val) case 14: pwm.getTimer().CC0.Set(val) case 15: pwm.getTimer().CC1.Set(val) case 16: pwm.getTimer().CC2.Set(val) case 17: pwm.getTimer().CC3.Set(val) case 18: pwm.getTimer().CC2.Set(val) case 19: pwm.getTimer().CC3.Set(val) case 20: pwm.getTimer().CC2.Set(val) case 21: pwm.getTimer().CC3.Set(val) default: return // not supported on this pin } } // USBCDC is the USB CDC aka serial over USB interface on the SAMD21. type USBCDC struct { Buffer *RingBuffer } // WriteByte writes a byte of data to the USB CDC interface. func (usbcdc USBCDC) WriteByte(c byte) error { // Supposedly to handle problem with Windows USB serial ports? if usbLineInfo.lineState > 0 { // set the data udd_ep_in_cache_buffer[usb_CDC_ENDPOINT_IN][0] = c usbEndpointDescriptors[usb_CDC_ENDPOINT_IN].DeviceDescBank[1].ADDR.Set(uint32(uintptr(unsafe.Pointer(&udd_ep_in_cache_buffer[usb_CDC_ENDPOINT_IN])))) // clean multi packet size of bytes already sent usbEndpointDescriptors[usb_CDC_ENDPOINT_IN].DeviceDescBank[1].PCKSIZE.ClearBits(usb_DEVICE_PCKSIZE_MULTI_PACKET_SIZE_Mask << usb_DEVICE_PCKSIZE_MULTI_PACKET_SIZE_Pos) // set count of bytes to be sent usbEndpointDescriptors[usb_CDC_ENDPOINT_IN].DeviceDescBank[1].PCKSIZE.SetBits((1 & usb_DEVICE_PCKSIZE_BYTE_COUNT_Mask) << usb_DEVICE_PCKSIZE_BYTE_COUNT_Pos) // clear transfer complete flag setEPINTFLAG(usb_CDC_ENDPOINT_IN, sam.USB_DEVICE_EPINTFLAG_TRCPT1) // send data by setting bank ready setEPSTATUSSET(usb_CDC_ENDPOINT_IN, sam.USB_DEVICE_EPSTATUSSET_BK1RDY) // wait for transfer to complete timeout := 3000 for (getEPINTFLAG(usb_CDC_ENDPOINT_IN) & sam.USB_DEVICE_EPINTFLAG_TRCPT1) == 0 { timeout-- if timeout == 0 { return errors.New("USBCDC write byte timeout") } } } return nil } func (usbcdc USBCDC) DTR() bool { return (usbLineInfo.lineState & usb_CDC_LINESTATE_DTR) > 0 } func (usbcdc USBCDC) RTS() bool { return (usbLineInfo.lineState & usb_CDC_LINESTATE_RTS) > 0 } const ( // these are SAMD21 specific. usb_DEVICE_PCKSIZE_BYTE_COUNT_Pos = 0 usb_DEVICE_PCKSIZE_BYTE_COUNT_Mask = 0x3FFF usb_DEVICE_PCKSIZE_SIZE_Pos = 28 usb_DEVICE_PCKSIZE_SIZE_Mask = 0x7 usb_DEVICE_PCKSIZE_MULTI_PACKET_SIZE_Pos = 14 usb_DEVICE_PCKSIZE_MULTI_PACKET_SIZE_Mask = 0x3FFF ) var ( usbEndpointDescriptors [8]usbDeviceDescriptor udd_ep_in_cache_buffer [7][128]uint8 udd_ep_out_cache_buffer [7][128]uint8 isEndpointHalt = false isRemoteWakeUpEnabled = false endPoints = []uint32{usb_ENDPOINT_TYPE_CONTROL, (usb_ENDPOINT_TYPE_INTERRUPT | usbEndpointIn), (usb_ENDPOINT_TYPE_BULK | usbEndpointOut), (usb_ENDPOINT_TYPE_BULK | usbEndpointIn)} usbConfiguration uint8 usbSetInterface uint8 usbLineInfo = cdcLineInfo{115200, 0x00, 0x00, 0x08, 0x00} ) // Configure the USB CDC interface. The config is here for compatibility with the UART interface. func (usbcdc USBCDC) Configure(config UARTConfig) { // reset USB interface sam.USB_DEVICE.CTRLA.SetBits(sam.USB_DEVICE_CTRLA_SWRST) for sam.USB_DEVICE.SYNCBUSY.HasBits(sam.USB_DEVICE_SYNCBUSY_SWRST) || sam.USB_DEVICE.SYNCBUSY.HasBits(sam.USB_DEVICE_SYNCBUSY_ENABLE) { } sam.USB_DEVICE.DESCADD.Set(uint32(uintptr(unsafe.Pointer(&usbEndpointDescriptors)))) // configure pins USBCDC_DM_PIN.Configure(PinConfig{Mode: PinCom}) USBCDC_DP_PIN.Configure(PinConfig{Mode: PinCom}) // performs pad calibration from store fuses handlePadCalibration() // run in standby sam.USB_DEVICE.CTRLA.SetBits(sam.USB_DEVICE_CTRLA_RUNSTDBY) // set full speed sam.USB_DEVICE.CTRLB.SetBits(sam.USB_DEVICE_CTRLB_SPDCONF_FS << sam.USB_DEVICE_CTRLB_SPDCONF_Pos) // attach sam.USB_DEVICE.CTRLB.ClearBits(sam.USB_DEVICE_CTRLB_DETACH) // enable interrupt for end of reset sam.USB_DEVICE.INTENSET.SetBits(sam.USB_DEVICE_INTENSET_EORST) // enable interrupt for start of frame sam.USB_DEVICE.INTENSET.SetBits(sam.USB_DEVICE_INTENSET_SOF) // enable USB sam.USB_DEVICE.CTRLA.SetBits(sam.USB_DEVICE_CTRLA_ENABLE) // enable IRQ intr := interrupt.New(sam.IRQ_USB, handleUSB) intr.Enable() } func handlePadCalibration() { // Load Pad Calibration data from non-volatile memory // This requires registers that are not included in the SVD file. // Modeled after defines from samd21g18a.h and nvmctrl.h: // // #define NVMCTRL_OTP4 0x00806020 // // #define USB_FUSES_TRANSN_ADDR (NVMCTRL_OTP4 + 4) // #define USB_FUSES_TRANSN_Pos 13 /**< \brief (NVMCTRL_OTP4) USB pad Transn calibration */ // #define USB_FUSES_TRANSN_Msk (0x1Fu << USB_FUSES_TRANSN_Pos) // #define USB_FUSES_TRANSN(value) ((USB_FUSES_TRANSN_Msk & ((value) << USB_FUSES_TRANSN_Pos))) // #define USB_FUSES_TRANSP_ADDR (NVMCTRL_OTP4 + 4) // #define USB_FUSES_TRANSP_Pos 18 /**< \brief (NVMCTRL_OTP4) USB pad Transp calibration */ // #define USB_FUSES_TRANSP_Msk (0x1Fu << USB_FUSES_TRANSP_Pos) // #define USB_FUSES_TRANSP(value) ((USB_FUSES_TRANSP_Msk & ((value) << USB_FUSES_TRANSP_Pos))) // #define USB_FUSES_TRIM_ADDR (NVMCTRL_OTP4 + 4) // #define USB_FUSES_TRIM_Pos 23 /**< \brief (NVMCTRL_OTP4) USB pad Trim calibration */ // #define USB_FUSES_TRIM_Msk (0x7u << USB_FUSES_TRIM_Pos) // #define USB_FUSES_TRIM(value) ((USB_FUSES_TRIM_Msk & ((value) << USB_FUSES_TRIM_Pos))) // fuse := *(*uint32)(unsafe.Pointer(uintptr(0x00806020) + 4)) calibTransN := uint16(fuse>>13) & uint16(0x1f) calibTransP := uint16(fuse>>18) & uint16(0x1f) calibTrim := uint16(fuse>>23) & uint16(0x7) if calibTransN == 0x1f { calibTransN = 5 } sam.USB_DEVICE.PADCAL.SetBits(calibTransN << sam.USB_DEVICE_PADCAL_TRANSN_Pos) if calibTransP == 0x1f { calibTransP = 29 } sam.USB_DEVICE.PADCAL.SetBits(calibTransP << sam.USB_DEVICE_PADCAL_TRANSP_Pos) if calibTrim == 0x7 { calibTransN = 3 } sam.USB_DEVICE.PADCAL.SetBits(calibTrim << sam.USB_DEVICE_PADCAL_TRIM_Pos) } func handleUSB(intr interrupt.Interrupt) { // reset all interrupt flags flags := sam.USB_DEVICE.INTFLAG.Get() sam.USB_DEVICE.INTFLAG.Set(flags) // End of reset if (flags & sam.USB_DEVICE_INTFLAG_EORST) > 0 { // Configure control endpoint initEndpoint(0, usb_ENDPOINT_TYPE_CONTROL) // Enable Setup-Received interrupt setEPINTENSET(0, sam.USB_DEVICE_EPINTENSET_RXSTP) usbConfiguration = 0 // ack the End-Of-Reset interrupt sam.USB_DEVICE.INTFLAG.Set(sam.USB_DEVICE_INTFLAG_EORST) } // Start of frame if (flags & sam.USB_DEVICE_INTFLAG_SOF) > 0 { // if you want to blink LED showing traffic, this would be the place... } // Endpoint 0 Setup interrupt if getEPINTFLAG(0)&sam.USB_DEVICE_EPINTFLAG_RXSTP > 0 { // ack setup received setEPINTFLAG(0, sam.USB_DEVICE_EPINTFLAG_RXSTP) // parse setup setup := newUSBSetup(udd_ep_out_cache_buffer[0][:]) // Clear the Bank 0 ready flag on Control OUT setEPSTATUSCLR(0, sam.USB_DEVICE_EPSTATUSCLR_BK0RDY) usbEndpointDescriptors[0].DeviceDescBank[0].PCKSIZE.ClearBits(usb_DEVICE_PCKSIZE_BYTE_COUNT_Mask << usb_DEVICE_PCKSIZE_BYTE_COUNT_Pos) ok := false if (setup.bmRequestType & usb_REQUEST_TYPE) == usb_REQUEST_STANDARD { // Standard Requests ok = handleStandardSetup(setup) } else { // Class Interface Requests if setup.wIndex == usb_CDC_ACM_INTERFACE { ok = cdcSetup(setup) } } if ok { // set Bank1 ready setEPSTATUSSET(0, sam.USB_DEVICE_EPSTATUSSET_BK1RDY) } else { // Stall endpoint setEPSTATUSSET(0, sam.USB_DEVICE_EPINTFLAG_STALL1) } if getEPINTFLAG(0)&sam.USB_DEVICE_EPINTFLAG_STALL1 > 0 { // ack the stall setEPINTFLAG(0, sam.USB_DEVICE_EPINTFLAG_STALL1) // clear stall request setEPINTENCLR(0, sam.USB_DEVICE_EPINTENCLR_STALL1) } } // Now the actual transfer handlers, ignore endpoint number 0 (setup) var i uint32 for i = 1; i < uint32(len(endPoints)); i++ { // Check if endpoint has a pending interrupt epFlags := getEPINTFLAG(i) if (epFlags&sam.USB_DEVICE_EPINTFLAG_TRCPT0) > 0 || (epFlags&sam.USB_DEVICE_EPINTFLAG_TRCPT1) > 0 { switch i { case usb_CDC_ENDPOINT_OUT: handleEndpoint(i) setEPINTFLAG(i, epFlags) case usb_CDC_ENDPOINT_IN, usb_CDC_ENDPOINT_ACM: setEPSTATUSCLR(i, sam.USB_DEVICE_EPSTATUSCLR_BK1RDY) setEPINTFLAG(i, sam.USB_DEVICE_EPINTFLAG_TRCPT1) } } } } func initEndpoint(ep, config uint32) { switch config { case usb_ENDPOINT_TYPE_INTERRUPT | usbEndpointIn: // set packet size usbEndpointDescriptors[ep].DeviceDescBank[1].PCKSIZE.SetBits(epPacketSize(64) << usb_DEVICE_PCKSIZE_SIZE_Pos) // set data buffer address usbEndpointDescriptors[ep].DeviceDescBank[1].ADDR.Set(uint32(uintptr(unsafe.Pointer(&udd_ep_in_cache_buffer[ep])))) // set endpoint type setEPCFG(ep, ((usb_ENDPOINT_TYPE_INTERRUPT + 1) << sam.USB_DEVICE_EPCFG_EPTYPE1_Pos)) case usb_ENDPOINT_TYPE_BULK | usbEndpointOut: // set packet size usbEndpointDescriptors[ep].DeviceDescBank[0].PCKSIZE.SetBits(epPacketSize(64) << usb_DEVICE_PCKSIZE_SIZE_Pos) // set data buffer address usbEndpointDescriptors[ep].DeviceDescBank[0].ADDR.Set(uint32(uintptr(unsafe.Pointer(&udd_ep_out_cache_buffer[ep])))) // set endpoint type setEPCFG(ep, ((usb_ENDPOINT_TYPE_BULK + 1) << sam.USB_DEVICE_EPCFG_EPTYPE0_Pos)) // receive interrupts when current transfer complete setEPINTENSET(ep, sam.USB_DEVICE_EPINTENSET_TRCPT0) // set byte count to zero, we have not received anything yet usbEndpointDescriptors[ep].DeviceDescBank[0].PCKSIZE.ClearBits(usb_DEVICE_PCKSIZE_BYTE_COUNT_Mask << usb_DEVICE_PCKSIZE_BYTE_COUNT_Pos) // ready for next transfer setEPSTATUSCLR(ep, sam.USB_DEVICE_EPSTATUSCLR_BK0RDY) case usb_ENDPOINT_TYPE_INTERRUPT | usbEndpointOut: // TODO: not really anything, seems like... case usb_ENDPOINT_TYPE_BULK | usbEndpointIn: // set packet size usbEndpointDescriptors[ep].DeviceDescBank[1].PCKSIZE.SetBits(epPacketSize(64) << usb_DEVICE_PCKSIZE_SIZE_Pos) // set data buffer address usbEndpointDescriptors[ep].DeviceDescBank[1].ADDR.Set(uint32(uintptr(unsafe.Pointer(&udd_ep_in_cache_buffer[ep])))) // set endpoint type setEPCFG(ep, ((usb_ENDPOINT_TYPE_BULK + 1) << sam.USB_DEVICE_EPCFG_EPTYPE1_Pos)) // NAK on endpoint IN, the bank is not yet filled in. setEPSTATUSCLR(ep, sam.USB_DEVICE_EPSTATUSCLR_BK1RDY) case usb_ENDPOINT_TYPE_CONTROL: // Control OUT // set packet size usbEndpointDescriptors[ep].DeviceDescBank[0].PCKSIZE.SetBits(epPacketSize(64) << usb_DEVICE_PCKSIZE_SIZE_Pos) // set data buffer address usbEndpointDescriptors[ep].DeviceDescBank[0].ADDR.Set(uint32(uintptr(unsafe.Pointer(&udd_ep_out_cache_buffer[ep])))) // set endpoint type setEPCFG(ep, getEPCFG(ep)|((usb_ENDPOINT_TYPE_CONTROL+1)<<sam.USB_DEVICE_EPCFG_EPTYPE0_Pos)) // Control IN // set packet size usbEndpointDescriptors[ep].DeviceDescBank[1].PCKSIZE.SetBits(epPacketSize(64) << usb_DEVICE_PCKSIZE_SIZE_Pos) // set data buffer address usbEndpointDescriptors[ep].DeviceDescBank[1].ADDR.Set(uint32(uintptr(unsafe.Pointer(&udd_ep_in_cache_buffer[ep])))) // set endpoint type setEPCFG(ep, getEPCFG(ep)|((usb_ENDPOINT_TYPE_CONTROL+1)<<sam.USB_DEVICE_EPCFG_EPTYPE1_Pos)) // Prepare OUT endpoint for receive // set multi packet size for expected number of receive bytes on control OUT usbEndpointDescriptors[ep].DeviceDescBank[0].PCKSIZE.SetBits(64 << usb_DEVICE_PCKSIZE_MULTI_PACKET_SIZE_Pos) // set byte count to zero, we have not received anything yet usbEndpointDescriptors[ep].DeviceDescBank[0].PCKSIZE.ClearBits(usb_DEVICE_PCKSIZE_BYTE_COUNT_Mask << usb_DEVICE_PCKSIZE_BYTE_COUNT_Pos) // NAK on endpoint OUT to show we are ready to receive control data setEPSTATUSSET(ep, sam.USB_DEVICE_EPSTATUSSET_BK0RDY) } } func handleStandardSetup(setup usbSetup) bool { switch setup.bRequest { case usb_GET_STATUS: buf := []byte{0, 0} if setup.bmRequestType != 0 { // endpoint // TODO: actually check if the endpoint in question is currently halted if isEndpointHalt { buf[0] = 1 } } sendUSBPacket(0, buf) return true case usb_CLEAR_FEATURE: if setup.wValueL == 1 { // DEVICEREMOTEWAKEUP isRemoteWakeUpEnabled = false } else if setup.wValueL == 0 { // ENDPOINTHALT isEndpointHalt = false } sendZlp(0) return true case usb_SET_FEATURE: if setup.wValueL == 1 { // DEVICEREMOTEWAKEUP isRemoteWakeUpEnabled = true } else if setup.wValueL == 0 { // ENDPOINTHALT isEndpointHalt = true } sendZlp(0) return true case usb_SET_ADDRESS: // set packet size 64 with auto Zlp after transfer usbEndpointDescriptors[0].DeviceDescBank[1].PCKSIZE.Set((epPacketSize(64) << usb_DEVICE_PCKSIZE_SIZE_Pos) | uint32(1<<31)) // autozlp // ack the transfer is complete from the request setEPINTFLAG(0, sam.USB_DEVICE_EPINTFLAG_TRCPT1) // set bank ready for data setEPSTATUSSET(0, sam.USB_DEVICE_EPSTATUSSET_BK1RDY) // wait for transfer to complete timeout := 3000 for (getEPINTFLAG(0) & sam.USB_DEVICE_EPINTFLAG_TRCPT1) == 0 { timeout-- if timeout == 0 { return true } } // last, set the device address to that requested by host sam.USB_DEVICE.DADD.SetBits(setup.wValueL) sam.USB_DEVICE.DADD.SetBits(sam.USB_DEVICE_DADD_ADDEN) return true case usb_GET_DESCRIPTOR: sendDescriptor(setup) return true case usb_SET_DESCRIPTOR: return false case usb_GET_CONFIGURATION: buff := []byte{usbConfiguration} sendUSBPacket(0, buff) return true case usb_SET_CONFIGURATION: if setup.bmRequestType&usb_REQUEST_RECIPIENT == usb_REQUEST_DEVICE { for i := 1; i < len(endPoints); i++ { initEndpoint(uint32(i), endPoints[i]) } usbConfiguration = setup.wValueL // Enable interrupt for CDC control messages from host (OUT packet) setEPINTENSET(usb_CDC_ENDPOINT_ACM, sam.USB_DEVICE_EPINTENSET_TRCPT1) // Enable interrupt for CDC data messages from host setEPINTENSET(usb_CDC_ENDPOINT_OUT, sam.USB_DEVICE_EPINTENSET_TRCPT0) sendZlp(0) return true } else { return false } case usb_GET_INTERFACE: buff := []byte{usbSetInterface} sendUSBPacket(0, buff) return true case usb_SET_INTERFACE: usbSetInterface = setup.wValueL sendZlp(0) return true default: return true } } func cdcSetup(setup usbSetup) bool { if setup.bmRequestType == usb_REQUEST_DEVICETOHOST_CLASS_INTERFACE { if setup.bRequest == usb_CDC_GET_LINE_CODING { b := make([]byte, 7) b[0] = byte(usbLineInfo.dwDTERate) b[1] = byte(usbLineInfo.dwDTERate >> 8) b[2] = byte(usbLineInfo.dwDTERate >> 16) b[3] = byte(usbLineInfo.dwDTERate >> 24) b[4] = byte(usbLineInfo.bCharFormat) b[5] = byte(usbLineInfo.bParityType) b[6] = byte(usbLineInfo.bDataBits) sendUSBPacket(0, b) return true } } if setup.bmRequestType == usb_REQUEST_HOSTTODEVICE_CLASS_INTERFACE { if setup.bRequest == usb_CDC_SET_LINE_CODING { b := receiveUSBControlPacket() usbLineInfo.dwDTERate = uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 usbLineInfo.bCharFormat = b[4] usbLineInfo.bParityType = b[5] usbLineInfo.bDataBits = b[6] } if setup.bRequest == usb_CDC_SET_CONTROL_LINE_STATE { usbLineInfo.lineState = setup.wValueL } if setup.bRequest == usb_CDC_SET_LINE_CODING || setup.bRequest == usb_CDC_SET_CONTROL_LINE_STATE { // auto-reset into the bootloader if usbLineInfo.dwDTERate == 1200 && usbLineInfo.lineState&usb_CDC_LINESTATE_DTR == 0 { ResetProcessor() } else { // TODO: cancel any reset } sendZlp(0) } if setup.bRequest == usb_CDC_SEND_BREAK { // TODO: something with this value? // breakValue = ((uint16_t)setup.wValueH << 8) | setup.wValueL; // return false; sendZlp(0) } return true } return false } //go:noinline func sendUSBPacket(ep uint32, data []byte) { copy(udd_ep_in_cache_buffer[ep][:], data) // Set endpoint address for sending data usbEndpointDescriptors[ep].DeviceDescBank[1].ADDR.Set(uint32(uintptr(unsafe.Pointer(&udd_ep_in_cache_buffer[ep])))) // clear multi-packet size which is total bytes already sent usbEndpointDescriptors[ep].DeviceDescBank[1].PCKSIZE.ClearBits(usb_DEVICE_PCKSIZE_MULTI_PACKET_SIZE_Mask << usb_DEVICE_PCKSIZE_MULTI_PACKET_SIZE_Pos) // set byte count, which is total number of bytes to be sent usbEndpointDescriptors[ep].DeviceDescBank[1].PCKSIZE.SetBits(uint32((len(data) & usb_DEVICE_PCKSIZE_BYTE_COUNT_Mask) << usb_DEVICE_PCKSIZE_BYTE_COUNT_Pos)) } func receiveUSBControlPacket() []byte { // address usbEndpointDescriptors[0].DeviceDescBank[0].ADDR.Set(uint32(uintptr(unsafe.Pointer(&udd_ep_out_cache_buffer[0])))) // set byte count to zero usbEndpointDescriptors[0].DeviceDescBank[0].PCKSIZE.ClearBits(usb_DEVICE_PCKSIZE_BYTE_COUNT_Mask << usb_DEVICE_PCKSIZE_BYTE_COUNT_Pos) // set ready for next data setEPSTATUSCLR(0, sam.USB_DEVICE_EPSTATUSCLR_BK0RDY) // Wait until OUT transfer is ready. timeout := 300000 for (getEPSTATUS(0) & sam.USB_DEVICE_EPSTATUS_BK0RDY) == 0 { timeout-- if timeout == 0 { return []byte{} } } // Wait until OUT transfer is completed. timeout = 300000 for (getEPINTFLAG(0) & sam.USB_DEVICE_EPINTFLAG_TRCPT0) == 0 { timeout-- if timeout == 0 { return []byte{} } } // get data bytesread := uint32((usbEndpointDescriptors[0].DeviceDescBank[0].PCKSIZE.Get() >> usb_DEVICE_PCKSIZE_BYTE_COUNT_Pos) & usb_DEVICE_PCKSIZE_BYTE_COUNT_Mask) data := make([]byte, bytesread) copy(data, udd_ep_out_cache_buffer[0][:]) return data } // sendDescriptor creates and sends the various USB descriptor types that // can be requested by the host. func sendDescriptor(setup usbSetup) { switch setup.wValueH { case usb_CONFIGURATION_DESCRIPTOR_TYPE: sendConfiguration(setup) return case usb_DEVICE_DESCRIPTOR_TYPE: if setup.wLength == 8 { // composite descriptor requested, so only send 8 bytes dd := NewDeviceDescriptor(0xEF, 0x02, 0x01, 64, usb_VID, usb_PID, 0x100, usb_IMANUFACTURER, usb_IPRODUCT, usb_ISERIAL, 1) sendUSBPacket(0, dd.Bytes()[:8]) } else { // complete descriptor requested so send entire packet dd := NewDeviceDescriptor(0x02, 0x00, 0x00, 64, usb_VID, usb_PID, 0x100, usb_IMANUFACTURER, usb_IPRODUCT, usb_ISERIAL, 1) sendUSBPacket(0, dd.Bytes()) } return case usb_STRING_DESCRIPTOR_TYPE: switch setup.wValueL { case 0: b := make([]byte, 4) b[0] = 0x04 b[1] = 0x03 b[2] = 0x09 b[3] = 0x04 sendUSBPacket(0, b) return case usb_IPRODUCT: prod := []byte(usb_STRING_PRODUCT) b := make([]byte, len(prod)*2+2) b[0] = byte(len(prod)*2 + 2) b[1] = 0x03 for i, val := range prod { b[i*2+2] = val b[i*2+3] = 0 } sendUSBPacket(0, b) return case usb_IMANUFACTURER: prod := []byte(usb_STRING_MANUFACTURER) b := make([]byte, len(prod)*2+2) b[0] = byte(len(prod)*2 + 2) b[1] = 0x03 for i, val := range prod { b[i*2+2] = val b[i*2+3] = 0 } sendUSBPacket(0, b) return case usb_ISERIAL: // TODO: allow returning a product serial number sendZlp(0) } // send final zero length packet and return sendZlp(0) return } // do not know how to handle this message, so return zero sendZlp(0) return } // sendConfiguration creates and sends the configuration packet to the host. func sendConfiguration(setup usbSetup) { if setup.wLength == 9 { sz := uint16(configDescriptorSize + cdcSize) config := NewConfigDescriptor(sz, 2) sendUSBPacket(0, config.Bytes()) } else { iad := NewIADDescriptor(0, 2, usb_CDC_COMMUNICATION_INTERFACE_CLASS, usb_CDC_ABSTRACT_CONTROL_MODEL, 0) cif := NewInterfaceDescriptor(usb_CDC_ACM_INTERFACE, 1, usb_CDC_COMMUNICATION_INTERFACE_CLASS, usb_CDC_ABSTRACT_CONTROL_MODEL, 0) header := NewCDCCSInterfaceDescriptor(usb_CDC_HEADER, usb_CDC_V1_10&0xFF, (usb_CDC_V1_10>>8)&0x0FF) controlManagement := NewACMFunctionalDescriptor(usb_CDC_ABSTRACT_CONTROL_MANAGEMENT, 6) functionalDescriptor := NewCDCCSInterfaceDescriptor(usb_CDC_UNION, usb_CDC_ACM_INTERFACE, usb_CDC_DATA_INTERFACE) callManagement := NewCMFunctionalDescriptor(usb_CDC_CALL_MANAGEMENT, 1, 1) cifin := NewEndpointDescriptor((usb_CDC_ENDPOINT_ACM | usbEndpointIn), usb_ENDPOINT_TYPE_INTERRUPT, 0x10, 0x10) dif := NewInterfaceDescriptor(usb_CDC_DATA_INTERFACE, 2, usb_CDC_DATA_INTERFACE_CLASS, 0, 0) out := NewEndpointDescriptor((usb_CDC_ENDPOINT_OUT | usbEndpointOut), usb_ENDPOINT_TYPE_BULK, usbEndpointPacketSize, 0) in := NewEndpointDescriptor((usb_CDC_ENDPOINT_IN | usbEndpointIn), usb_ENDPOINT_TYPE_BULK, usbEndpointPacketSize, 0) cdc := NewCDCDescriptor(iad, cif, header, controlManagement, functionalDescriptor, callManagement, cifin, dif, out, in) sz := uint16(configDescriptorSize + cdcSize) config := NewConfigDescriptor(sz, 2) buf := make([]byte, 0, sz) buf = append(buf, config.Bytes()...) buf = append(buf, cdc.Bytes()...) sendUSBPacket(0, buf) } } func handleEndpoint(ep uint32) { // get data count := int((usbEndpointDescriptors[ep].DeviceDescBank[0].PCKSIZE.Get() >> usb_DEVICE_PCKSIZE_BYTE_COUNT_Pos) & usb_DEVICE_PCKSIZE_BYTE_COUNT_Mask) // move to ring buffer for i := 0; i < count; i++ { UART0.Receive(byte((udd_ep_out_cache_buffer[ep][i] & 0xFF))) } // set byte count to zero usbEndpointDescriptors[ep].DeviceDescBank[0].PCKSIZE.ClearBits(usb_DEVICE_PCKSIZE_BYTE_COUNT_Mask << usb_DEVICE_PCKSIZE_BYTE_COUNT_Pos) // set multi packet size to 64 usbEndpointDescriptors[ep].DeviceDescBank[0].PCKSIZE.SetBits(64 << usb_DEVICE_PCKSIZE_MULTI_PACKET_SIZE_Pos) // set ready for next data setEPSTATUSCLR(ep, sam.USB_DEVICE_EPSTATUSCLR_BK0RDY) } func sendZlp(ep uint32) { usbEndpointDescriptors[ep].DeviceDescBank[1].PCKSIZE.ClearBits(usb_DEVICE_PCKSIZE_BYTE_COUNT_Mask << usb_DEVICE_PCKSIZE_BYTE_COUNT_Pos) } func epPacketSize(size uint16) uint32 { switch size { case 8: return 0 case 16: return 1 case 32: return 2 case 64: return 3 case 128: return 4 case 256: return 5 case 512: return 6 case 1023: return 7 default: return 0 } } func getEPCFG(ep uint32) uint8 { switch ep { case 0: return sam.USB_DEVICE.EPCFG0.Get() case 1: return sam.USB_DEVICE.EPCFG1.Get() case 2: return sam.USB_DEVICE.EPCFG2.Get() case 3: return sam.USB_DEVICE.EPCFG3.Get() case 4: return sam.USB_DEVICE.EPCFG4.Get() case 5: return sam.USB_DEVICE.EPCFG5.Get() case 6: return sam.USB_DEVICE.EPCFG6.Get() case 7: return sam.USB_DEVICE.EPCFG7.Get() default: return 0 } } func setEPCFG(ep uint32, val uint8) { switch ep { case 0: sam.USB_DEVICE.EPCFG0.Set(val) case 1: sam.USB_DEVICE.EPCFG1.Set(val) case 2: sam.USB_DEVICE.EPCFG2.Set(val) case 3: sam.USB_DEVICE.EPCFG3.Set(val) case 4: sam.USB_DEVICE.EPCFG4.Set(val) case 5: sam.USB_DEVICE.EPCFG5.Set(val) case 6: sam.USB_DEVICE.EPCFG6.Set(val) case 7: sam.USB_DEVICE.EPCFG7.Set(val) default: return } } func setEPSTATUSCLR(ep uint32, val uint8) { switch ep { case 0: sam.USB_DEVICE.EPSTATUSCLR0.Set(val) case 1: sam.USB_DEVICE.EPSTATUSCLR1.Set(val) case 2: sam.USB_DEVICE.EPSTATUSCLR2.Set(val) case 3: sam.USB_DEVICE.EPSTATUSCLR3.Set(val) case 4: sam.USB_DEVICE.EPSTATUSCLR4.Set(val) case 5: sam.USB_DEVICE.EPSTATUSCLR5.Set(val) case 6: sam.USB_DEVICE.EPSTATUSCLR6.Set(val) case 7: sam.USB_DEVICE.EPSTATUSCLR7.Set(val) default: return } } func setEPSTATUSSET(ep uint32, val uint8) { switch ep { case 0: sam.USB_DEVICE.EPSTATUSSET0.Set(val) case 1: sam.USB_DEVICE.EPSTATUSSET1.Set(val) case 2: sam.USB_DEVICE.EPSTATUSSET2.Set(val) case 3: sam.USB_DEVICE.EPSTATUSSET3.Set(val) case 4: sam.USB_DEVICE.EPSTATUSSET4.Set(val) case 5: sam.USB_DEVICE.EPSTATUSSET5.Set(val) case 6: sam.USB_DEVICE.EPSTATUSSET6.Set(val) case 7: sam.USB_DEVICE.EPSTATUSSET7.Set(val) default: return } } func getEPSTATUS(ep uint32) uint8 { switch ep { case 0: return sam.USB_DEVICE.EPSTATUS0.Get() case 1: return sam.USB_DEVICE.EPSTATUS1.Get() case 2: return sam.USB_DEVICE.EPSTATUS2.Get() case 3: return sam.USB_DEVICE.EPSTATUS3.Get() case 4: return sam.USB_DEVICE.EPSTATUS4.Get() case 5: return sam.USB_DEVICE.EPSTATUS5.Get() case 6: return sam.USB_DEVICE.EPSTATUS6.Get() case 7: return sam.USB_DEVICE.EPSTATUS7.Get() default: return 0 } } func getEPINTFLAG(ep uint32) uint8 { switch ep { case 0: return sam.USB_DEVICE.EPINTFLAG0.Get() case 1: return sam.USB_DEVICE.EPINTFLAG1.Get() case 2: return sam.USB_DEVICE.EPINTFLAG2.Get() case 3: return sam.USB_DEVICE.EPINTFLAG3.Get() case 4: return sam.USB_DEVICE.EPINTFLAG4.Get() case 5: return sam.USB_DEVICE.EPINTFLAG5.Get() case 6: return sam.USB_DEVICE.EPINTFLAG6.Get() case 7: return sam.USB_DEVICE.EPINTFLAG7.Get() default: return 0 } } func setEPINTFLAG(ep uint32, val uint8) { switch ep { case 0: sam.USB_DEVICE.EPINTFLAG0.Set(val) case 1: sam.USB_DEVICE.EPINTFLAG1.Set(val) case 2: sam.USB_DEVICE.EPINTFLAG2.Set(val) case 3: sam.USB_DEVICE.EPINTFLAG3.Set(val) case 4: sam.USB_DEVICE.EPINTFLAG4.Set(val) case 5: sam.USB_DEVICE.EPINTFLAG5.Set(val) case 6: sam.USB_DEVICE.EPINTFLAG6.Set(val) case 7: sam.USB_DEVICE.EPINTFLAG7.Set(val) default: return } } func setEPINTENCLR(ep uint32, val uint8) { switch ep { case 0: sam.USB_DEVICE.EPINTENCLR0.Set(val) case 1: sam.USB_DEVICE.EPINTENCLR1.Set(val) case 2: sam.USB_DEVICE.EPINTENCLR2.Set(val) case 3: sam.USB_DEVICE.EPINTENCLR3.Set(val) case 4: sam.USB_DEVICE.EPINTENCLR4.Set(val) case 5: sam.USB_DEVICE.EPINTENCLR5.Set(val) case 6: sam.USB_DEVICE.EPINTENCLR6.Set(val) case 7: sam.USB_DEVICE.EPINTENCLR7.Set(val) default: return } } func setEPINTENSET(ep uint32, val uint8) { switch ep { case 0: sam.USB_DEVICE.EPINTENSET0.Set(val) case 1: sam.USB_DEVICE.EPINTENSET1.Set(val) case 2: sam.USB_DEVICE.EPINTENSET2.Set(val) case 3: sam.USB_DEVICE.EPINTENSET3.Set(val) case 4: sam.USB_DEVICE.EPINTENSET4.Set(val) case 5: sam.USB_DEVICE.EPINTENSET5.Set(val) case 6: sam.USB_DEVICE.EPINTENSET6.Set(val) case 7: sam.USB_DEVICE.EPINTENSET7.Set(val) default: return } } // ResetProcessor should perform a system reset in preperation // to switch to the bootloader to flash new firmware. func ResetProcessor() { arm.DisableInterrupts() // Perform magic reset into bootloader, as mentioned in // https://github.com/arduino/ArduinoCore-samd/issues/197 *(*uint32)(unsafe.Pointer(uintptr(0x20007FFC))) = RESET_MAGIC_VALUE arm.SystemReset() }
1
9,245
This line appears to have caused the regression. What is it supposed to be doing?
tinygo-org-tinygo
go
@@ -54,10 +54,8 @@ class ProxyType: value = str(value).upper() for attr in dir(cls): attr_value = getattr(cls, attr) - if isinstance(attr_value, dict) and \ - 'string' in attr_value and \ - attr_value['string'] is not None and \ - attr_value['string'] == value: + # `attr_value['string'] is not None` probably not required as `attr_value['string'] == value` + if isinstance(attr_value, dict) and 'string' in attr_value and attr_value['string'] == value: return attr_value raise Exception(f"No proxy type is found for {value}")
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ The Proxy implementation. """ class ProxyTypeFactory: """ Factory for proxy types. """ @staticmethod def make(ff_value, string): return {'ff_value': ff_value, 'string': string} class ProxyType: """ Set of possible types of proxy. Each proxy type has 2 properties: 'ff_value' is value of Firefox profile preference, 'string' is id of proxy type. """ DIRECT = ProxyTypeFactory.make(0, 'DIRECT') # Direct connection, no proxy (default on Windows). MANUAL = ProxyTypeFactory.make(1, 'MANUAL') # Manual proxy settings (e.g., for httpProxy). PAC = ProxyTypeFactory.make(2, 'PAC') # Proxy autoconfiguration from URL. RESERVED_1 = ProxyTypeFactory.make(3, 'RESERVED1') # Never used. AUTODETECT = ProxyTypeFactory.make(4, 'AUTODETECT') # Proxy autodetection (presumably with WPAD). SYSTEM = ProxyTypeFactory.make(5, 'SYSTEM') # Use system settings (default on Linux). UNSPECIFIED = ProxyTypeFactory.make(6, 'UNSPECIFIED') # Not initialized (for internal use). @classmethod def load(cls, value): if isinstance(value, dict) and 'string' in value: value = value['string'] value = str(value).upper() for attr in dir(cls): attr_value = getattr(cls, attr) if isinstance(attr_value, dict) and \ 'string' in attr_value and \ attr_value['string'] is not None and \ attr_value['string'] == value: return attr_value raise Exception(f"No proxy type is found for {value}") class Proxy(object): """ Proxy contains information about proxy type and necessary proxy settings. """ proxyType = ProxyType.UNSPECIFIED autodetect = False ftpProxy = '' httpProxy = '' noProxy = '' proxyAutoconfigUrl = '' sslProxy = '' socksProxy = '' socksUsername = '' socksPassword = '' socksVersion = None def __init__(self, raw=None): """ Creates a new Proxy. :Args: - raw: raw proxy data. If None, default class values are used. """ if raw is not None: if 'proxyType' in raw and raw['proxyType'] is not None: self.proxy_type = ProxyType.load(raw['proxyType']) if 'ftpProxy' in raw and raw['ftpProxy'] is not None: self.ftp_proxy = raw['ftpProxy'] if 'httpProxy' in raw and raw['httpProxy'] is not None: self.http_proxy = raw['httpProxy'] if 'noProxy' in raw and raw['noProxy'] is not None: self.no_proxy = raw['noProxy'] if 'proxyAutoconfigUrl' in raw and raw['proxyAutoconfigUrl'] is not None: self.proxy_autoconfig_url = raw['proxyAutoconfigUrl'] if 'sslProxy' in raw and raw['sslProxy'] is not None: self.sslProxy = raw['sslProxy'] if 'autodetect' in raw and raw['autodetect'] is not None: self.auto_detect = raw['autodetect'] if 'socksProxy' in raw and raw['socksProxy'] is not None: self.socks_proxy = raw['socksProxy'] if 'socksUsername' in raw and raw['socksUsername'] is not None: self.socks_username = raw['socksUsername'] if 'socksPassword' in raw and raw['socksPassword'] is not None: self.socks_password = raw['socksPassword'] if 'socksVersion' in raw and raw['socksVersion'] is not None: self.socks_version = raw['socksVersion'] @property def proxy_type(self): """ Returns proxy type as `ProxyType`. """ return self.proxyType @proxy_type.setter def proxy_type(self, value): """ Sets proxy type. :Args: - value: The proxy type. """ self._verify_proxy_type_compatibility(value) self.proxyType = value @property def auto_detect(self): """ Returns autodetect setting. """ return self.autodetect @auto_detect.setter def auto_detect(self, value): """ Sets autodetect setting. :Args: - value: The autodetect value. """ if isinstance(value, bool): if self.autodetect is not value: self._verify_proxy_type_compatibility(ProxyType.AUTODETECT) self.proxyType = ProxyType.AUTODETECT self.autodetect = value else: raise ValueError("Autodetect proxy value needs to be a boolean") @property def ftp_proxy(self): """ Returns ftp proxy setting. """ return self.ftpProxy @ftp_proxy.setter def ftp_proxy(self, value): """ Sets ftp proxy setting. :Args: - value: The ftp proxy value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.ftpProxy = value @property def http_proxy(self): """ Returns http proxy setting. """ return self.httpProxy @http_proxy.setter def http_proxy(self, value): """ Sets http proxy setting. :Args: - value: The http proxy value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.httpProxy = value @property def no_proxy(self): """ Returns noproxy setting. """ return self.noProxy @no_proxy.setter def no_proxy(self, value): """ Sets noproxy setting. :Args: - value: The noproxy value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.noProxy = value @property def proxy_autoconfig_url(self): """ Returns proxy autoconfig url setting. """ return self.proxyAutoconfigUrl @proxy_autoconfig_url.setter def proxy_autoconfig_url(self, value): """ Sets proxy autoconfig url setting. :Args: - value: The proxy autoconfig url value. """ self._verify_proxy_type_compatibility(ProxyType.PAC) self.proxyType = ProxyType.PAC self.proxyAutoconfigUrl = value @property def ssl_proxy(self): """ Returns https proxy setting. """ return self.sslProxy @ssl_proxy.setter def ssl_proxy(self, value): """ Sets https proxy setting. :Args: - value: The https proxy value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.sslProxy = value @property def socks_proxy(self): """ Returns socks proxy setting. """ return self.socksProxy @socks_proxy.setter def socks_proxy(self, value): """ Sets socks proxy setting. :Args: - value: The socks proxy value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.socksProxy = value @property def socks_username(self): """ Returns socks proxy username setting. """ return self.socksUsername @socks_username.setter def socks_username(self, value): """ Sets socks proxy username setting. :Args: - value: The socks proxy username value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.socksUsername = value @property def socks_password(self): """ Returns socks proxy password setting. """ return self.socksPassword @socks_password.setter def socks_password(self, value): """ Sets socks proxy password setting. :Args: - value: The socks proxy password value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.socksPassword = value @property def socks_version(self): """ Returns socks proxy version setting. """ return self.socksVersion @socks_version.setter def socks_version(self, value): """ Sets socks proxy version setting. :Args: - value: The socks proxy version value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.socksVersion = value def _verify_proxy_type_compatibility(self, compatibleProxy): if self.proxyType != ProxyType.UNSPECIFIED and self.proxyType != compatibleProxy: raise Exception(f"Specified proxy type ({compatibleProxy}) not compatible with current setting ({self.proxyType})") def add_to_capabilities(self, capabilities): """ Adds proxy information as capability in specified capabilities. :Args: - capabilities: The capabilities to which proxy will be added. """ proxy_caps = {} proxy_caps['proxyType'] = self.proxyType['string'] if self.autodetect: proxy_caps['autodetect'] = self.autodetect if self.ftpProxy: proxy_caps['ftpProxy'] = self.ftpProxy if self.httpProxy: proxy_caps['httpProxy'] = self.httpProxy if self.proxyAutoconfigUrl: proxy_caps['proxyAutoconfigUrl'] = self.proxyAutoconfigUrl if self.sslProxy: proxy_caps['sslProxy'] = self.sslProxy if self.noProxy: proxy_caps['noProxy'] = self.noProxy if self.socksProxy: proxy_caps['socksProxy'] = self.socksProxy if self.socksUsername: proxy_caps['socksUsername'] = self.socksUsername if self.socksPassword: proxy_caps['socksPassword'] = self.socksPassword if self.socksVersion: proxy_caps['socksVersion'] = self.socksVersion capabilities['proxy'] = proxy_caps
1
18,411
# `attr_value['string'] is not None` probably not required as `attr_value['string'] == value` check is already being done
SeleniumHQ-selenium
rb
@@ -39,13 +39,11 @@ namespace OpenTelemetry.Metrics configure?.Invoke(options); var exporter = new PrometheusExporter(options); - - var metricReader = new BaseExportingMetricReader(exporter); - exporter.CollectMetric = metricReader.Collect; + var reader = new BaseExportingMetricReader(exporter); var metricsHttpServer = new PrometheusExporterMetricsHttpServer(exporter); metricsHttpServer.Start(); - return builder.AddMetricReader(metricReader); + return builder.AddMetricReader(reader); } } }
1
// <copyright file="MeterProviderBuilderExtensions.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using OpenTelemetry.Exporter; namespace OpenTelemetry.Metrics { public static class MeterProviderBuilderExtensions { /// <summary> /// Adds Console exporter to the TracerProvider. /// </summary> /// <param name="builder"><see cref="MeterProviderBuilder"/> builder to use.</param> /// <param name="configure">Exporter configuration options.</param> /// <returns>The instance of <see cref="MeterProviderBuilder"/> to chain the calls.</returns> [System.Diagnostics.CodeAnalysis.SuppressMessage("Reliability", "CA2000:Dispose objects before losing scope", Justification = "The objects should not be disposed.")] public static MeterProviderBuilder AddPrometheusExporter(this MeterProviderBuilder builder, Action<PrometheusExporterOptions> configure = null) { if (builder == null) { throw new ArgumentNullException(nameof(builder)); } var options = new PrometheusExporterOptions(); configure?.Invoke(options); var exporter = new PrometheusExporter(options); var metricReader = new BaseExportingMetricReader(exporter); exporter.CollectMetric = metricReader.Collect; var metricsHttpServer = new PrometheusExporterMetricsHttpServer(exporter); metricsHttpServer.Start(); return builder.AddMetricReader(metricReader); } } }
1
21,550
@alanwest I noticed this while changing the code. I think we _might_ run into some race condition - if a scraper happens to hit the HTTP server before we could add the reader, what would happen (I guess we will hit exception, which turns into HTTP 500)? I haven't looked into the HTTP server logic. I think it _might_ be OKAY. A better version could be - we only start the HTTP server once the exporter/reader are fully ready and both are hooked up to the provider.
open-telemetry-opentelemetry-dotnet
.cs
@@ -224,7 +224,13 @@ namespace Datadog.Trace.ClrProfiler.IntegrationTests.AspNetCore if (!process.HasExited) { - process.Kill(); + // Try shutting down gracefully + await SubmitRequest(aspNetCorePort, "/shutdown"); + + if (!process.WaitForExit(5000)) + { + process.Kill(); + } } SpanTestHelpers.AssertExpectationsMet(Expectations, spans);
1
// <copyright file="AspNetCoreMvcTestBase.cs" company="Datadog"> // Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. // </copyright> using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Net.Http; using System.Threading; using System.Threading.Tasks; using Datadog.Core.Tools; using Datadog.Trace.Configuration; using Datadog.Trace.ExtensionMethods; using Datadog.Trace.TestHelpers; using Xunit.Abstractions; namespace Datadog.Trace.ClrProfiler.IntegrationTests.AspNetCore { public abstract class AspNetCoreMvcTestBase : TestHelper { protected const string TopLevelOperationName = "aspnet_core.request"; protected const string HeaderName1WithMapping = "datadog-header-name"; protected const string HeaderName1UpperWithMapping = "DATADOG-HEADER-NAME"; protected const string HeaderTagName1WithMapping = "datadog-header-tag"; protected const string HeaderValue1 = "asp-net-core"; protected const string HeaderName2 = "sample.correlation.identifier"; protected const string HeaderValue2 = "0000-0000-0000"; protected const string HeaderName3 = "Server"; protected const string HeaderValue3 = "Kestrel"; protected AspNetCoreMvcTestBase(string sampleAppName, ITestOutputHelper output, string serviceVersion) : base(sampleAppName, output) { ServiceVersion = serviceVersion; HttpClient = new HttpClient(); HttpClient.DefaultRequestHeaders.Add(HeaderName1WithMapping, HeaderValue1); HttpClient.DefaultRequestHeaders.Add(HeaderName2, HeaderValue2); SetEnvironmentVariable(ConfigurationKeys.HeaderTags, $"{HeaderName1UpperWithMapping}:{HeaderTagName1WithMapping},{HeaderName2},{HeaderName3}"); SetEnvironmentVariable(ConfigurationKeys.HttpServerErrorStatusCodes, "400-403, 500-503"); SetServiceVersion(ServiceVersion); CreateTopLevelExpectation(url: "/", httpMethod: "GET", httpStatus: "200", resourceUrl: "Home/Index", serviceVersion: ServiceVersion); CreateTopLevelExpectation(url: "/delay/0", httpMethod: "GET", httpStatus: "200", resourceUrl: "delay/{seconds}", serviceVersion: ServiceVersion); CreateTopLevelExpectation(url: "/api/delay/0", httpMethod: "GET", httpStatus: "200", resourceUrl: "api/delay/{seconds}", serviceVersion: ServiceVersion); CreateTopLevelExpectation(url: "/not-found", httpMethod: "GET", httpStatus: "404", resourceUrl: "/not-found", serviceVersion: ServiceVersion); CreateTopLevelExpectation(url: "/status-code/203", httpMethod: "GET", httpStatus: "203", resourceUrl: "status-code/{statusCode}", serviceVersion: ServiceVersion); CreateTopLevelExpectation( url: "/status-code/500", httpMethod: "GET", httpStatus: "500", resourceUrl: "status-code/{statusCode}", serviceVersion: ServiceVersion, additionalCheck: span => { var failures = new List<string>(); if (span.Error == 0) { failures.Add($"Expected Error flag set within {span.Resource}"); } if (SpanExpectation.GetTag(span, Tags.ErrorType) != null) { failures.Add($"Did not expect exception type within {span.Resource}"); } var errorMessage = SpanExpectation.GetTag(span, Tags.ErrorMsg); if (errorMessage != "The HTTP response has status code 500.") { failures.Add($"Expected specific error message within {span.Resource}. Found \"{errorMessage}\""); } return failures; }); CreateTopLevelExpectation( url: "/bad-request", httpMethod: "GET", httpStatus: "500", resourceUrl: "bad-request", serviceVersion: ServiceVersion, additionalCheck: span => { var failures = new List<string>(); if (span.Error == 0) { failures.Add($"Expected Error flag set within {span.Resource}"); } if (SpanExpectation.GetTag(span, Tags.ErrorType) != "System.Exception") { failures.Add($"Expected specific exception within {span.Resource}"); } var errorMessage = SpanExpectation.GetTag(span, Tags.ErrorMsg); if (errorMessage != "This was a bad request.") { failures.Add($"Expected specific error message within {span.Resource}. Found \"{errorMessage}\""); } return failures; }); CreateTopLevelExpectation( url: "/status-code/402", httpMethod: "GET", httpStatus: "402", resourceUrl: "status-code/{statusCode}", serviceVersion: ServiceVersion, additionalCheck: span => { var failures = new List<string>(); if (span.Error == 0) { failures.Add($"Expected Error flag set within {span.Resource}"); } var errorMessage = SpanExpectation.GetTag(span, Tags.ErrorMsg); if (errorMessage != "The HTTP response has status code 402.") { failures.Add($"Expected specific error message within {span.Resource}. Found \"{errorMessage}\""); } return failures; }); } public string ServiceVersion { get; } protected HttpClient HttpClient { get; } protected List<AspNetCoreMvcSpanExpectation> Expectations { get; set; } = new List<AspNetCoreMvcSpanExpectation>(); public async Task RunTraceTestOnSelfHosted(string packageVersion) { var agentPort = TcpPortProvider.GetOpenPort(); var aspNetCorePort = TcpPortProvider.GetOpenPort(); using (var agent = new MockTracerAgent(agentPort)) using (var process = StartSample(agent.Port, arguments: null, packageVersion: packageVersion, aspNetCorePort: aspNetCorePort)) { agent.SpanFilters.Add(IsNotServerLifeCheck); var wh = new EventWaitHandle(false, EventResetMode.AutoReset); process.OutputDataReceived += (sender, args) => { if (args.Data != null) { if (args.Data.Contains("Now listening on:") || args.Data.Contains("Unable to start Kestrel")) { wh.Set(); } Output.WriteLine($"[webserver][stdout] {args.Data}"); } }; process.BeginOutputReadLine(); process.ErrorDataReceived += (sender, args) => { if (args.Data != null) { Output.WriteLine($"[webserver][stderr] {args.Data}"); } }; process.BeginErrorReadLine(); wh.WaitOne(5000); var maxMillisecondsToWait = 15_000; var intervalMilliseconds = 500; var intervals = maxMillisecondsToWait / intervalMilliseconds; var serverReady = false; // wait for server to be ready to receive requests while (intervals-- > 0) { try { serverReady = await SubmitRequest(aspNetCorePort, "/alive-check") == HttpStatusCode.OK; } catch { // ignore } if (serverReady) { break; } Thread.Sleep(intervalMilliseconds); } if (!serverReady) { throw new Exception("Couldn't verify the application is ready to receive requests."); } var testStart = DateTime.Now; var paths = Expectations.Select(e => e.OriginalUri).ToArray(); await SubmitRequests(aspNetCorePort, paths); var spans = agent.WaitForSpans( Expectations.Count, operationName: TopLevelOperationName, minDateTime: testStart) .OrderBy(s => s.Start) .ToList(); if (!process.HasExited) { process.Kill(); } SpanTestHelpers.AssertExpectationsMet(Expectations, spans); } } protected void CreateTopLevelExpectation( string url, string httpMethod, string httpStatus, string resourceUrl, string serviceVersion, Func<MockTracerAgent.Span, List<string>> additionalCheck = null) { var resourceName = $"{httpMethod.ToUpper()} {resourceUrl}"; var expectation = new AspNetCoreMvcSpanExpectation( EnvironmentHelper.FullSampleName, serviceVersion, TopLevelOperationName, resourceName, httpStatus, httpMethod) { OriginalUri = url, }; expectation.RegisterDelegateExpectation(additionalCheck); _ = HeaderTagName1WithMapping.TryConvertToNormalizedHeaderTagName(out string normalizedHeaderTagName1WithMapping); expectation.RegisterTagExpectation(normalizedHeaderTagName1WithMapping, HeaderValue1); // For successful requests, assert that a header tag is present in both the request and response, with the prefixes "http.request.headers" and "http.response.headers", respectively _ = HeaderName2.TryConvertToNormalizedHeaderTagName(out string normalizedHeaderTagName2); expectation.RegisterTagExpectation($"{SpanContextPropagator.HttpRequestHeadersTagPrefix}.{normalizedHeaderTagName2}", HeaderValue2); expectation.RegisterTagExpectation($"{SpanContextPropagator.HttpResponseHeadersTagPrefix}.{normalizedHeaderTagName2}", HeaderValue2, when: (span) => span.Resource != "GET /not-found" && span.Resource != "GET bad-request"); // Assert that a response header tag is set on successful requests and failing requests _ = HeaderName3.TryConvertToNormalizedHeaderTagName(out string normalizedHeaderTagName3); expectation.RegisterTagExpectation($"{SpanContextPropagator.HttpResponseHeadersTagPrefix}.{normalizedHeaderTagName3}", HeaderValue3, when: (span) => span.Resource != "GET bad-request"); Expectations.Add(expectation); } protected async Task SubmitRequests(int aspNetCorePort, string[] paths) { foreach (var path in paths) { await SubmitRequest(aspNetCorePort, path); } } protected async Task<HttpStatusCode> SubmitRequest(int aspNetCorePort, string path) { HttpResponseMessage response = await HttpClient.GetAsync($"http://localhost:{aspNetCorePort}{path}"); string responseText = await response.Content.ReadAsStringAsync(); Output.WriteLine($"[http] {response.StatusCode} {responseText}"); return response.StatusCode; } private bool IsNotServerLifeCheck(MockTracerAgent.Span span) { var url = SpanExpectation.GetTag(span, Tags.HttpUrl); if (url == null) { return true; } return !url.Contains("alive-check"); } } }
1
20,695
As for Owin, I think we should exclude this URL in `IsNotServerLifeCheck` too?
DataDog-dd-trace-dotnet
.cs
@@ -0,0 +1,16 @@ +using System; +using System.Net.Http; + +namespace OpenTelemetry.Exporter +{ + public interface IHttpClientFactoryExporterOptions + { + /// <summary> + /// Gets or sets the factory function called to create the <see + /// cref="HttpClient"/> instance that will be used at runtime to + /// transmit telemetry over HTTP. The returned instance will be reused + /// for all export invocations. + /// </summary> + public Func<HttpClient> HttpClientFactory { get; set; } + } +}
1
1
23,030
Does it make sense to just fold these options back into the exporter options? My thinking is that this option is unlike the processor/metric reader options in that it actually is about the exporter itself.
open-telemetry-opentelemetry-dotnet
.cs
@@ -12,6 +12,6 @@ return [ | */ - 'failed' => 'یہ تفصیلات ہمارے ریکارڈ سے مطابقت نہیں رکھتیں۔', - 'throttle' => 'لاگ اِن کرنے کی بہت زیادہ کوششیں۔ براہِ مہربانی :seconds سیکنڈ میں دوبارہ کوشش کریں۔', + 'failed' => 'یہ تفصیلات درست نہیں ہیں۔', + 'throttle' => 'لاگ اِن کرنے کی بہت زیادہ کوششیں۔ براہِ مہربانی کچھ سیکنڈ میں دوبارہ کوشش کریں۔', ];
1
<?php return [ /* |-------------------------------------------------------------------------- | Authentication Language Lines |-------------------------------------------------------------------------- | | The following language lines are used during authentication for various | messages that we need to display to the user. You are free to modify | these language lines according to your application's requirements. | */ 'failed' => 'یہ تفصیلات ہمارے ریکارڈ سے مطابقت نہیں رکھتیں۔', 'throttle' => 'لاگ اِن کرنے کی بہت زیادہ کوششیں۔ براہِ مہربانی :seconds سیکنڈ میں دوبارہ کوشش کریں۔', ];
1
6,989
here is `:seconds` missing
Laravel-Lang-lang
php
@@ -149,7 +149,7 @@ class KeywordArgument(object): func = lambda arg=arg: arg * arg # [undefined-variable] arg2 = 0 - func2 = lambda arg2=arg2: arg2 * arg2 + func3 = lambda arg2=arg2: arg2 * arg2 # Don't emit if the code is protected by NameError try:
1
# pylint: disable=missing-docstring, multiple-statements # pylint: disable=too-few-public-methods, no-init, no-self-use, old-style-class,bare-except,broad-except from __future__ import print_function DEFINED = 1 if DEFINED != 1: if DEFINED in (unknown, DEFINED): # [undefined-variable] DEFINED += 1 def in_method(var): """method doc""" var = nomoreknown # [undefined-variable] assert var DEFINED = {DEFINED:__revision__} # [undefined-variable] # +1:[undefined-variable] DEFINED[__revision__] = OTHER = 'move this is astroid test' OTHER += '$' def bad_default(var, default=unknown2): # [undefined-variable] """function with defaut arg's value set to an unexistant name""" print(var, default) print(xxxx) # [undefined-variable] augvar += 1 # [undefined-variable] del vardel # [undefined-variable] LMBD = lambda x, y=doesnotexist: x+y # [undefined-variable] LMBD2 = lambda x, y: x+z # [undefined-variable] try: POUET # don't catch me except NameError: POUET = 'something' try: POUETT # [used-before-assignment] except Exception: # pylint:disable = broad-except POUETT = 'something' try: POUETTT # don't catch me except: # pylint:disable = bare-except POUETTT = 'something' print(POUET, POUETT, POUETTT) try: PLOUF # [used-before-assignment] except ValueError: PLOUF = 'something' print(PLOUF) def if_branch_test(something): """hop""" if something == 0: if xxx == 1: # [used-before-assignment] pass else: print(xxx) xxx = 3 def decorator(arg): """Decorator with one argument.""" return lambda: list(arg) @decorator(arg=[i * 2 for i in range(15)]) def func1(): """A function with a decorator that contains a listcomp.""" @decorator(arg=(i * 2 for i in range(15))) def func2(): """A function with a decorator that contains a genexpr.""" @decorator(lambda x: x > 0) def main(): """A function with a decorator that contains a lambda.""" # Test shared scope. def test_arguments(arg=TestClass): # [used-before-assignment] """ TestClass isn't defined yet. """ return arg class TestClass(Ancestor): # [used-before-assignment] """ contains another class, which uses an undefined ancestor. """ class MissingAncestor(Ancestor1): # [used-before-assignment] """ no op """ def test1(self): """ It should trigger here, because the two classes have the same scope. """ class UsingBeforeDefinition(Empty): # [used-before-assignment] """ uses Empty before definition """ class Empty(object): """ no op """ return UsingBeforeDefinition def test(self): """ Ancestor isn't defined yet, but we don't care. """ class MissingAncestor1(Ancestor): """ no op """ return MissingAncestor1 class Self(object): """ Detect when using the same name inside the class scope. """ obj = Self # [undefined-variable] class Self1(object): """ No error should be raised here. """ def test(self): """ empty """ return Self1 class Ancestor(object): """ No op """ class Ancestor1(object): """ No op """ NANA = BAT # [undefined-variable] del BAT class KeywordArgument(object): """Test keyword arguments.""" enable = True def test(self, is_enabled=enable): """do nothing.""" def test1(self, is_enabled=enabled): # [used-before-assignment] """enabled is undefined at this point, but it is used before assignment.""" def test2(self, is_disabled=disabled): # [undefined-variable] """disabled is undefined""" enabled = True func = lambda arg=arg: arg * arg # [undefined-variable] arg2 = 0 func2 = lambda arg2=arg2: arg2 * arg2 # Don't emit if the code is protected by NameError try: unicode_1 except NameError: pass try: unicode_2 # [undefined-variable] except Exception: pass try: unicode_3 # [undefined-variable] except: pass try: unicode_4 # [undefined-variable] except ValueError: pass # See https://bitbucket.org/logilab/pylint/issue/111/ try: raise IOError(1, "a") except IOError as err: print(err)
1
9,065
change of name is not needed
PyCQA-pylint
py
@@ -120,10 +120,10 @@ class SeriesTest(ReusedSQLTestCase, SQLTestUtils): self.assertEqual(kser.name, "renamed") self.assert_eq(kser, pser) - pser.name = None - kser.name = None - self.assertEqual(kser.name, None) - self.assert_eq(kser, pser) + # pser.name = None + # kser.name = None + # self.assertEqual(kser.name, None) + # self.assert_eq(kser, pser) pidx = pser.index kidx = kser.index
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import base64 from collections import defaultdict from distutils.version import LooseVersion import inspect from io import BytesIO from itertools import product from datetime import datetime, timedelta import matplotlib matplotlib.use("agg") from matplotlib import pyplot as plt import numpy as np import pandas as pd import pyspark from pyspark.ml.linalg import SparseVector from databricks import koalas as ks from databricks.koalas import Series from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils from databricks.koalas.exceptions import PandasNotImplementedError from databricks.koalas.missing.series import MissingPandasLikeSeries class SeriesTest(ReusedSQLTestCase, SQLTestUtils): @property def pser(self): return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") @property def kser(self): return ks.from_pandas(self.pser) def test_series(self): kser = self.kser self.assertTrue(isinstance(kser, Series)) self.assert_eq(kser + 1, self.pser + 1) def test_series_tuple_name(self): pser = self.pser pser.name = ("x", "a") kser = ks.from_pandas(pser) self.assert_eq(kser, pser) self.assert_eq(kser.name, pser.name) pser.name = ("y", "z") kser.name = ("y", "z") self.assert_eq(kser, pser) self.assert_eq(kser.name, pser.name) def test_repr_cache_invalidation(self): # If there is any cache, inplace operations should invalidate it. s = ks.range(10)["id"] s.__repr__() s.rename("a", inplace=True) self.assertEqual(s.__repr__(), s.rename("a").__repr__()) def test_empty_series(self): a = pd.Series([], dtype="i1") b = pd.Series([], dtype="str") self.assert_eq(ks.from_pandas(a), a) self.assertRaises(ValueError, lambda: ks.from_pandas(b)) with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): self.assert_eq(ks.from_pandas(a), a) self.assertRaises(ValueError, lambda: ks.from_pandas(b)) def test_all_null_series(self): a = pd.Series([None, None, None], dtype="float64") b = pd.Series([None, None, None], dtype="str") self.assert_eq(ks.from_pandas(a).dtype, a.dtype) self.assertTrue(ks.from_pandas(a).toPandas().isnull().all()) self.assertRaises(ValueError, lambda: ks.from_pandas(b)) with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): self.assert_eq(ks.from_pandas(a).dtype, a.dtype) self.assertTrue(ks.from_pandas(a).toPandas().isnull().all()) self.assertRaises(ValueError, lambda: ks.from_pandas(b)) def test_head_tail(self): kser = self.kser pser = self.pser self.assert_eq(kser.head(3), pser.head(3)) self.assert_eq(kser.head(0), pser.head(0)) self.assert_eq(kser.head(-3), pser.head(-3)) self.assert_eq(kser.head(-10), pser.head(-10)) # TODO: self.assert_eq(kser.tail(3), pser.tail(3)) def test_rename(self): pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") kser = ks.from_pandas(pser) pser.name = "renamed" kser.name = "renamed" self.assertEqual(kser.name, "renamed") self.assert_eq(kser, pser) pser.name = None kser.name = None self.assertEqual(kser.name, None) self.assert_eq(kser, pser) pidx = pser.index kidx = kser.index pidx.name = "renamed" kidx.name = "renamed" self.assertEqual(kidx.name, "renamed") self.assert_eq(kidx, pidx) def test_rename_method(self): # Series name pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") kser = ks.from_pandas(pser) self.assert_eq(kser.rename("y"), pser.rename("y")) self.assertEqual(kser.name, "x") # no mutation self.assert_eq(kser.rename(), pser.rename()) kser.rename("z", inplace=True) pser.rename("z", inplace=True) self.assertEqual(kser.name, "z") self.assert_eq(kser, pser) # Series index # pser = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x') # kser = ks.from_pandas(s) # TODO: index # res = kser.rename(lambda x: x ** 2) # self.assert_eq(res, pser.rename(lambda x: x ** 2)) # res = kser.rename(pser) # self.assert_eq(res, pser.rename(pser)) # res = kser.rename(kser) # self.assert_eq(res, pser.rename(pser)) # res = kser.rename(lambda x: x**2, inplace=True) # self.assertis(res, kser) # s.rename(lambda x: x**2, inplace=True) # self.assert_eq(kser, pser) def test_or(self): pdf = pd.DataFrame( { "left": [True, False, True, False, np.nan, np.nan, True, False, np.nan], "right": [True, False, False, True, True, False, np.nan, np.nan, np.nan], } ) kdf = ks.from_pandas(pdf) self.assert_eq(pdf["left"] | pdf["right"], kdf["left"] | kdf["right"]) def test_and(self): pdf = pd.DataFrame( { "left": [True, False, True, False, np.nan, np.nan, True, False, np.nan], "right": [True, False, False, True, True, False, np.nan, np.nan, np.nan], } ) kdf = ks.from_pandas(pdf) self.assert_eq(pdf["left"] & pdf["right"], kdf["left"] & kdf["right"]) def test_to_numpy(self): pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") kser = ks.from_pandas(pser) np.testing.assert_equal(kser.to_numpy(), pser.values) def test_isin(self): pser = pd.Series(["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal") kser = ks.from_pandas(pser) self.assert_eq(kser.isin(["cow", "lama"]), pser.isin(["cow", "lama"])) self.assert_eq(kser.isin({"cow"}), pser.isin({"cow"})) msg = "only list-like objects are allowed to be passed to isin()" with self.assertRaisesRegex(TypeError, msg): kser.isin(1) def test_fillna(self): pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x") kser = ks.from_pandas(pser) self.assert_eq(kser.fillna(0), pser.fillna(0)) self.assert_eq(kser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0)) kser.fillna(0, inplace=True) pser.fillna(0, inplace=True) self.assert_eq(kser, pser) # test considering series does not have NA/NaN values kser.fillna(0, inplace=True) pser.fillna(0, inplace=True) self.assert_eq(kser, pser) pser = pd.Series([1, 2, 3, 4, 5, 6], name="x") kser = ks.from_pandas(pser) pser.loc[3] = np.nan kser.loc[3] = np.nan self.assert_eq(kser.fillna(0), pser.fillna(0)) self.assert_eq(kser.fillna(method="ffill"), pser.fillna(method="ffill")) self.assert_eq(kser.fillna(method="bfill"), pser.fillna(method="bfill")) def test_dropna(self): pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x") kser = ks.from_pandas(pser) self.assert_eq(kser.dropna(), pser.dropna()) kser.dropna(inplace=True) self.assert_eq(kser, pser.dropna()) def test_nunique(self): pser = pd.Series([1, 2, 1, np.nan]) kser = ks.from_pandas(pser) # Assert NaNs are dropped by default nunique_result = kser.nunique() self.assertEqual(nunique_result, 2) self.assert_eq(nunique_result, pser.nunique()) # Assert including NaN values nunique_result = kser.nunique(dropna=False) self.assertEqual(nunique_result, 3) self.assert_eq(nunique_result, pser.nunique(dropna=False)) # Assert approximate counts self.assertEqual(ks.Series(range(100)).nunique(approx=True), 103) self.assertEqual(ks.Series(range(100)).nunique(approx=True, rsd=0.01), 100) def _test_value_counts(self): # this is also containing test for Index & MultiIndex pser = pd.Series([1, 2, 1, 3, 3, np.nan, 1, 4], name="x") kser = ks.from_pandas(pser) exp = pser.value_counts() res = kser.value_counts() self.assertEqual(res.name, exp.name) self.assert_eq(res, exp, almost=True) self.assert_eq( kser.value_counts(normalize=True), pser.value_counts(normalize=True), almost=True ) self.assert_eq( kser.value_counts(ascending=True), pser.value_counts(ascending=True), almost=True ) self.assert_eq( kser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) with self.assertRaisesRegex( NotImplementedError, "value_counts currently does not support bins" ): kser.value_counts(bins=3) pser.name = "index" kser.name = "index" self.assert_eq(kser.value_counts(), pser.value_counts(), almost=True) # Series from DataFrame pdf = pd.DataFrame({"a": [1, 2, 3], "b": [None, 1, None]}) kdf = ks.from_pandas(pdf) self.assert_eq( kdf.a.value_counts(normalize=True), pdf.a.value_counts(normalize=True), almost=True ) self.assert_eq( kdf.a.value_counts(ascending=True), pdf.a.value_counts(ascending=True), almost=True ) self.assert_eq( kdf.a.value_counts(normalize=True, dropna=False), pdf.a.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kdf.a.value_counts(ascending=True, dropna=False), pdf.a.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) # Series with NaN index pser = pd.Series([1, 2, 3], index=[2, None, 5]) kser = ks.from_pandas(pser) self.assert_eq( kser.value_counts(normalize=True), pser.value_counts(normalize=True), almost=True ) self.assert_eq( kser.value_counts(ascending=True), pser.value_counts(ascending=True), almost=True ) self.assert_eq( kser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) # Series with MultiIndex pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) kser = ks.from_pandas(pser) self.assert_eq( kser.value_counts(normalize=True), pser.value_counts(normalize=True), almost=True ) self.assert_eq( kser.value_counts(ascending=True), pser.value_counts(ascending=True), almost=True ) self.assert_eq( kser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) # Series with MultiIndex some of index has NaN pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", None), ("y", "c")]) kser = ks.from_pandas(pser) self.assert_eq( kser.value_counts(normalize=True), pser.value_counts(normalize=True), almost=True ) self.assert_eq( kser.value_counts(ascending=True), pser.value_counts(ascending=True), almost=True ) self.assert_eq( kser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) # Series with MultiIndex some of index is NaN. # This test only available for pandas >= 0.24. if LooseVersion(pd.__version__) >= LooseVersion("0.24"): pser.index = pd.MultiIndex.from_tuples([("x", "a"), None, ("y", "c")]) kser = ks.from_pandas(pser) self.assert_eq( kser.value_counts(normalize=True), pser.value_counts(normalize=True), almost=True ) self.assert_eq( kser.value_counts(ascending=True), pser.value_counts(ascending=True), almost=True ) self.assert_eq( kser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) def test_value_counts(self): if LooseVersion(pyspark.__version__) < LooseVersion("2.4"): with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): self._test_value_counts() self.assertRaises( RuntimeError, lambda: ks.MultiIndex.from_tuples([("x", "a"), ("x", "b")]).value_counts(), ) else: self._test_value_counts() def test_nsmallest(self): sample_lst = [1, 2, 3, 4, np.nan, 6] pser = pd.Series(sample_lst, name="x") kser = ks.Series(sample_lst, name="x") self.assert_eq(kser.nsmallest(n=3), pser.nsmallest(n=3)) self.assert_eq(kser.nsmallest(), pser.nsmallest()) self.assert_eq((kser + 1).nsmallest(), (pser + 1).nsmallest()) def test_nlargest(self): sample_lst = [1, 2, 3, 4, np.nan, 6] pser = pd.Series(sample_lst, name="x") kser = ks.Series(sample_lst, name="x") self.assert_eq(kser.nlargest(n=3), pser.nlargest(n=3)) self.assert_eq(kser.nlargest(), pser.nlargest()) self.assert_eq((kser + 1).nlargest(), (pser + 1).nlargest()) def test_isnull(self): pser = pd.Series([1, 2, 3, 4, np.nan, 6], name="x") kser = ks.from_pandas(pser) self.assert_eq(kser.notnull(), pser.notnull()) self.assert_eq(kser.isnull(), pser.isnull()) pser = self.pser kser = self.kser self.assert_eq(kser.notnull(), pser.notnull()) self.assert_eq(kser.isnull(), pser.isnull()) def test_all(self): for pser in [ pd.Series([True, True], name="x"), pd.Series([True, False], name="x"), pd.Series([0, 1], name="x"), pd.Series([1, 2, 3], name="x"), pd.Series([True, True, None], name="x"), pd.Series([True, False, None], name="x"), pd.Series([], name="x"), pd.Series([np.nan], name="x"), ]: kser = ks.from_pandas(pser) self.assert_eq(kser.all(), pser.all()) pser = pd.Series([1, 2, 3, 4], name="x") kser = ks.from_pandas(pser) self.assert_eq((kser % 2 == 0).all(), (pser % 2 == 0).all()) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): kser.all(axis=1) def test_any(self): for pser in [ pd.Series([False, False], name="x"), pd.Series([True, False], name="x"), pd.Series([0, 1], name="x"), pd.Series([1, 2, 3], name="x"), pd.Series([True, True, None], name="x"), pd.Series([True, False, None], name="x"), pd.Series([], name="x"), pd.Series([np.nan], name="x"), ]: kser = ks.from_pandas(pser) self.assert_eq(kser.any(), pser.any()) pser = pd.Series([1, 2, 3, 4], name="x") kser = ks.from_pandas(pser) self.assert_eq((kser % 2 == 0).any(), (pser % 2 == 0).any()) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): kser.any(axis=1) def test_reset_index_with_default_index_types(self): pser = pd.Series([1, 2, 3], name="0", index=np.random.rand(3)) kser = ks.from_pandas(pser) with ks.option_context("compute.default_index_type", "sequence"): self.assert_eq(kser.reset_index(), pser.reset_index()) with ks.option_context("compute.default_index_type", "distributed-sequence"): # the order might be changed. self.assert_eq(kser.reset_index().sort_index(), pser.reset_index()) with ks.option_context("compute.default_index_type", "distributed"): # the index is different. self.assert_eq( kser.reset_index().to_pandas().reset_index(drop=True), pser.reset_index() ) def test_sort_values(self): pser = pd.Series([1, 2, 3, 4, 5, None, 7], name="0") kser = ks.from_pandas(pser) self.assert_eq(repr(kser.sort_values()), repr(pser.sort_values())) self.assert_eq( repr(kser.sort_values(ascending=False)), repr(pser.sort_values(ascending=False)) ) self.assert_eq( repr(kser.sort_values(na_position="first")), repr(pser.sort_values(na_position="first")) ) self.assertRaises(ValueError, lambda: kser.sort_values(na_position="invalid")) self.assert_eq(kser.sort_values(inplace=True), pser.sort_values(inplace=True)) self.assert_eq(repr(kser), repr(pser)) def test_sort_index(self): pser = pd.Series([2, 1, np.nan], index=["b", "a", np.nan], name="0") kser = ks.from_pandas(pser) # Assert invalid parameters self.assertRaises(NotImplementedError, lambda: kser.sort_index(axis=1)) self.assertRaises(NotImplementedError, lambda: kser.sort_index(kind="mergesort")) self.assertRaises(ValueError, lambda: kser.sort_index(na_position="invalid")) # Assert default behavior without parameters self.assert_eq(kser.sort_index(), pser.sort_index(), almost=True) # Assert sorting descending self.assert_eq( kser.sort_index(ascending=False), pser.sort_index(ascending=False), almost=True ) # Assert sorting NA indices first self.assert_eq( kser.sort_index(na_position="first"), pser.sort_index(na_position="first"), almost=True ) # Assert sorting inplace self.assertEqual(kser.sort_index(inplace=True), pser.sort_index(inplace=True)) self.assert_eq(kser, pser, almost=True) # Assert multi-indices pser = pd.Series(range(4), index=[["b", "b", "a", "a"], [1, 0, 1, 0]], name="0") kser = ks.from_pandas(pser) self.assert_eq(kser.sort_index(), pser.sort_index(), almost=True) self.assert_eq(kser.sort_index(level=[1, 0]), pser.sort_index(level=[1, 0]), almost=True) self.assert_eq(kser.reset_index().sort_index(), pser.reset_index().sort_index()) def test_to_datetime(self): pser = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 100) kser = ks.from_pandas(pser) self.assert_eq( pd.to_datetime(pser, infer_datetime_format=True), ks.to_datetime(kser, infer_datetime_format=True), ) def test_missing(self): kser = self.kser missing_functions = inspect.getmembers(MissingPandasLikeSeries, inspect.isfunction) unsupported_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function" ] for name in unsupported_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(kser, name)() deprecated_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function" ] for name in deprecated_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Series.*{}.*is deprecated".format(name) ): getattr(kser, name)() missing_properties = inspect.getmembers( MissingPandasLikeSeries, lambda o: isinstance(o, property) ) unsupported_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "unsupported_property" ] for name in unsupported_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(kser, name) deprecated_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "deprecated_property" ] for name in deprecated_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Series.*{}.*is deprecated".format(name) ): getattr(kser, name) def test_clip(self): pser = pd.Series([0, 2, 4], index=np.random.rand(3)) kser = ks.from_pandas(pser) # Assert list-like values are not accepted for 'lower' and 'upper' msg = "List-like value are not supported for 'lower' and 'upper' at the moment" with self.assertRaises(ValueError, msg=msg): kser.clip(lower=[1]) with self.assertRaises(ValueError, msg=msg): kser.clip(upper=[1]) # Assert no lower or upper self.assert_eq(kser.clip(), pser.clip()) # Assert lower only self.assert_eq(kser.clip(1), pser.clip(1)) # Assert upper only self.assert_eq(kser.clip(upper=3), pser.clip(upper=3)) # Assert lower and upper self.assert_eq(kser.clip(1, 3), pser.clip(1, 3)) # Assert behavior on string values str_kser = ks.Series(["a", "b", "c"]) self.assert_eq(str_kser.clip(1, 3), str_kser) def test_is_unique(self): # We can't use pandas' is_unique for comparison. pandas 0.23 ignores None pser = pd.Series([1, 2, 2, None, None]) kser = ks.from_pandas(pser) self.assertEqual(False, kser.is_unique) self.assertEqual(False, (kser + 1).is_unique) pser = pd.Series([1, None, None]) kser = ks.from_pandas(pser) self.assertEqual(False, kser.is_unique) self.assertEqual(False, (kser + 1).is_unique) pser = pd.Series([1]) kser = ks.from_pandas(pser) self.assertEqual(pser.is_unique, kser.is_unique) self.assertEqual((pser + 1).is_unique, (kser + 1).is_unique) pser = pd.Series([1, 1, 1]) kser = ks.from_pandas(pser) self.assertEqual(pser.is_unique, kser.is_unique) self.assertEqual((pser + 1).is_unique, (kser + 1).is_unique) def test_to_list(self): if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"): self.assertEqual(self.kser.to_list(), self.pser.to_list()) def test_append(self): pser1 = pd.Series([1, 2, 3], name="0") pser2 = pd.Series([4, 5, 6], name="0") pser3 = pd.Series([4, 5, 6], index=[3, 4, 5], name="0") kser1 = ks.from_pandas(pser1) kser2 = ks.from_pandas(pser2) kser3 = ks.from_pandas(pser3) self.assert_eq(kser1.append(kser2), pser1.append(pser2)) self.assert_eq(kser1.append(kser3), pser1.append(pser3)) self.assert_eq( kser1.append(kser2, ignore_index=True), pser1.append(pser2, ignore_index=True) ) kser1.append(kser3, verify_integrity=True) msg = "Indices have overlapping values" with self.assertRaises(ValueError, msg=msg): kser1.append(kser2, verify_integrity=True) def test_map(self): pser = pd.Series(["cat", "dog", None, "rabbit"]) kser = ks.from_pandas(pser) # Currently Koalas doesn't return NaN as pandas does. self.assertEqual( repr(kser.map({})), repr(pser.map({}).replace({pd.np.nan: None}).rename(0)) ) d = defaultdict(lambda: "abc") self.assertTrue("abc" in repr(kser.map(d))) self.assertEqual(repr(kser.map(d)), repr(pser.map(d).rename(0))) def tomorrow(date) -> datetime: return date + timedelta(days=1) pser = pd.Series([datetime(2019, 10, 24)]) kser = ks.from_pandas(pser) self.assertEqual(repr(kser.map(tomorrow)), repr(pser.map(tomorrow).rename(0))) def test_add_prefix(self): pser = pd.Series([1, 2, 3, 4], name="0") kser = ks.from_pandas(pser) self.assert_eq(pser.add_prefix("item_"), kser.add_prefix("item_")) pser = pd.Series( [1, 2, 3], name="0", index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]), ) kser = ks.from_pandas(pser) self.assert_eq(pser.add_prefix("item_"), kser.add_prefix("item_")) def test_add_suffix(self): pser = pd.Series([1, 2, 3, 4], name="0") kser = ks.from_pandas(pser) self.assert_eq(pser.add_suffix("_item"), kser.add_suffix("_item")) pser = pd.Series( [1, 2, 3], name="0", index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]), ) kser = ks.from_pandas(pser) self.assert_eq(pser.add_suffix("_item"), kser.add_suffix("_item")) def test_hist(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10] ) kdf = ks.from_pandas(pdf) def plot_to_base64(ax): bytes_data = BytesIO() ax.figure.savefig(bytes_data, format="png") bytes_data.seek(0) b64_data = base64.b64encode(bytes_data.read()) plt.close(ax.figure) return b64_data _, ax1 = plt.subplots(1, 1) # Using plot.hist() because pandas changes ticks props when called hist() ax1 = pdf["a"].plot.hist() _, ax2 = plt.subplots(1, 1) ax2 = kdf["a"].hist() self.assert_eq(plot_to_base64(ax1), plot_to_base64(ax2)) def test_cummin(self): pser = pd.Series([1.0, None, 0.0, 4.0, 9.0]).rename("a") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cummin()), repr(kser.cummin())) self.assertEqual(repr(pser.cummin(skipna=False)), repr(kser.cummin(skipna=False))) # with reversed index pser.index = [4, 3, 2, 1, 0] kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cummin()), repr(kser.cummin())) self.assertEqual(repr(pser.cummin(skipna=False)), repr(kser.cummin(skipna=False))) def test_cummax(self): pser = pd.Series([1.0, None, 0.0, 4.0, 9.0]).rename("a") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cummax()), repr(kser.cummax())) self.assertEqual(repr(pser.cummax(skipna=False)), repr(kser.cummax(skipna=False))) # with reversed index pser.index = [4, 3, 2, 1, 0] kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cummax()), repr(kser.cummax())) self.assertEqual(repr(pser.cummax(skipna=False)), repr(kser.cummax(skipna=False))) def test_cumsum(self): pser = pd.Series([1.0, None, 0.0, 4.0, 9.0]).rename("a") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cumsum()), repr(kser.cumsum())) self.assertEqual(repr(pser.cumsum(skipna=False)), repr(kser.cumsum(skipna=False))) # with reversed index pser.index = [4, 3, 2, 1, 0] kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cumsum()), repr(kser.cumsum())) self.assertEqual(repr(pser.cumsum(skipna=False)), repr(kser.cumsum(skipna=False))) def test_cumprod(self): pser = pd.Series([1.0, None, 1.0, 4.0, 9.0]).rename("a") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cumprod()), repr(kser.cumprod())) self.assertEqual(repr(pser.cumprod(skipna=False)), repr(kser.cumprod(skipna=False))) # with reversed index pser.index = [4, 3, 2, 1, 0] kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cumprod()), repr(kser.cumprod())) self.assertEqual(repr(pser.cumprod(skipna=False)), repr(kser.cumprod(skipna=False))) with self.assertRaisesRegex(Exception, "values should be bigger than 0"): repr(ks.Series([0, 1]).cumprod()) def test_median(self): with self.assertRaisesRegex(ValueError, "accuracy must be an integer; however"): ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).median(accuracy="a") def test_rank(self): pser = pd.Series([1, 2, 3, 1], name="x") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.rank()), repr(kser.rank().sort_index())) self.assertEqual(repr(pser.rank()), repr(kser.rank().sort_index())) self.assertEqual( repr(pser.rank(ascending=False)), repr(kser.rank(ascending=False).sort_index()) ) self.assertEqual(repr(pser.rank(method="min")), repr(kser.rank(method="min").sort_index())) self.assertEqual(repr(pser.rank(method="max")), repr(kser.rank(method="max").sort_index())) self.assertEqual( repr(pser.rank(method="first")), repr(kser.rank(method="first").sort_index()) ) self.assertEqual( repr(pser.rank(method="dense")), repr(kser.rank(method="dense").sort_index()) ) msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'" with self.assertRaisesRegex(ValueError, msg): kser.rank(method="nothing") def test_round(self): pser = pd.Series([0.028208, 0.038683, 0.877076], name="x") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.round(2)), repr(kser.round(2))) msg = "decimals must be an integer" with self.assertRaisesRegex(ValueError, msg): kser.round(1.5) def test_quantile(self): with self.assertRaisesRegex(ValueError, "accuracy must be an integer; however"): ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(accuracy="a") with self.assertRaisesRegex(ValueError, "q must be a float of an array of floats;"): ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q="a") with self.assertRaisesRegex(ValueError, "q must be a float of an array of floats;"): ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q=["a"]) def test_idxmax(self): pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"]) kser = ks.Series(pser) self.assertEqual(kser.idxmax(), pser.idxmax()) self.assertEqual(kser.idxmax(skipna=False), pser.idxmax(skipna=False)) index = pd.MultiIndex.from_arrays( [["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second") ) pser = pd.Series(data=[1, 2, 4, 5], index=index) kser = ks.Series(pser) self.assertEqual(kser.idxmax(), pser.idxmax()) self.assertEqual(kser.idxmax(skipna=False), pser.idxmax(skipna=False)) kser = ks.Series([]) with self.assertRaisesRegex(ValueError, "an empty sequence"): kser.idxmax() pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8]) kser = ks.Series(pser) self.assertEqual(kser.idxmax(), pser.idxmax()) self.assertEqual(repr(kser.idxmax(skipna=False)), repr(pser.idxmax(skipna=False))) def test_idxmin(self): pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"]) kser = ks.Series(pser) self.assertEqual(kser.idxmin(), pser.idxmin()) self.assertEqual(kser.idxmin(skipna=False), pser.idxmin(skipna=False)) index = pd.MultiIndex.from_arrays( [["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second") ) pser = pd.Series(data=[1, 2, 4, 5], index=index) kser = ks.Series(pser) self.assertEqual(kser.idxmin(), pser.idxmin()) self.assertEqual(kser.idxmin(skipna=False), pser.idxmin(skipna=False)) kser = ks.Series([]) with self.assertRaisesRegex(ValueError, "an empty sequence"): kser.idxmin() pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8]) kser = ks.Series(pser) self.assertEqual(kser.idxmin(), pser.idxmin()) self.assertEqual(repr(kser.idxmin(skipna=False)), repr(pser.idxmin(skipna=False))) def test_shift(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) if LooseVersion(pd.__version__) < LooseVersion("0.24.2"): self.assertEqual(repr(kser.shift(periods=2)), repr(pser.shift(periods=2))) else: self.assertEqual( repr(kser.shift(periods=2, fill_value=0)), repr(pser.shift(periods=2, fill_value=0)) ) with self.assertRaisesRegex(ValueError, "periods should be an int; however"): kser.shift(periods=1.5) def test_astype(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) self.assert_eq(kser.astype(int), pser.astype(int)) self.assert_eq(kser.astype(bool), pser.astype(bool)) pser = pd.Series([10, 20, 15, 30, 45, None, np.nan], name="x") kser = ks.Series(pser) self.assert_eq(kser.astype(bool), pser.astype(bool)) pser = pd.Series(["hi", "hi ", " ", " \t", "", None], name="x") kser = ks.Series(pser) self.assert_eq(kser.astype(bool), pser.astype(bool)) self.assert_eq(kser.str.strip().astype(bool), pser.str.strip().astype(bool)) pser = pd.Series([True, False, None], name="x") kser = ks.Series(pser) self.assert_eq(kser.astype(bool), pser.astype(bool)) with self.assertRaisesRegex(ValueError, "Type int63 not understood"): kser.astype("int63") def test_aggregate(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) msg = "func must be a string or list of strings" with self.assertRaisesRegex(ValueError, msg): kser.aggregate({"x": ["min", "max"]}) msg = ( "If the given function is a list, it " "should only contains function names as strings." ) with self.assertRaisesRegex(ValueError, msg): kser.aggregate(["min", max]) def test_drop(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) msg = "Need to specify at least one of 'labels' or 'index'" with self.assertRaisesRegex(ValueError, msg): kser.drop() # For MultiIndex midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) msg = "'level' should be less than the number of indexes" with self.assertRaisesRegex(ValueError, msg): kser.drop(labels="weight", level=2) msg = ( "If the given index is a list, it " "should only contains names as strings, " "or a list of tuples that contain " "index names as strings" ) with self.assertRaisesRegex(ValueError, msg): kser.drop(["lama", ["cow", "falcon"]]) msg = "'index' type should be one of str, list, tuple" with self.assertRaisesRegex(ValueError, msg): kser.drop({"lama": "speed"}) msg = "Cannot specify both 'labels' and 'index'" with self.assertRaisesRegex(ValueError, msg): kser.drop("lama", index="cow") msg = r"'Key length \(2\) exceeds index depth \(3\)'" with self.assertRaisesRegex(KeyError, msg): kser.drop(("lama", "speed", "x")) self.assert_eq(kser.drop(("lama", "speed", "x"), level=1), kser) def test_pop(self): midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) pser = kser.to_pandas() self.assert_eq(kser.pop(("lama", "speed")), pser.pop(("lama", "speed"))) msg = "'key' should be string or tuple that contains strings" with self.assertRaisesRegex(ValueError, msg): kser.pop(0) msg = ( "'key' should have index names as only strings " "or a tuple that contain index names as only strings" ) with self.assertRaisesRegex(ValueError, msg): kser.pop(("lama", 0)) msg = r"'Key length \(3\) exceeds index depth \(2\)'" with self.assertRaisesRegex(KeyError, msg): kser.pop(("lama", "speed", "x")) def test_replace(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) self.assert_eq(kser.replace(), pser.replace()) self.assert_eq(kser.replace({}), pser.replace({})) msg = "'to_replace' should be one of str, list, dict, int, float" with self.assertRaisesRegex(ValueError, msg): kser.replace(ks.range(5)) msg = "Replacement lists must match in length. Expecting 3 got 2" with self.assertRaisesRegex(ValueError, msg): kser.replace([10, 20, 30], [1, 2]) msg = "replace currently not support for regex" with self.assertRaisesRegex(NotImplementedError, msg): kser.replace(r"^1.$", regex=True) def test_xs(self): midx = pd.MultiIndex( [["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) pser = kser.to_pandas() self.assert_eq(kser.xs(("a", "lama", "speed")), pser.xs(("a", "lama", "speed"))) def test_duplicates(self): psers = { "test on texts": pd.Series( ["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal" ), "test on numbers": pd.Series([1, 1, 2, 4, 3]), } keeps = ["first", "last", False] for (msg, pser), keep in product(psers.items(), keeps): with self.subTest(msg, keep=keep): kser = ks.Series(pser) self.assert_eq( pser.drop_duplicates(keep=keep).sort_values(), kser.drop_duplicates(keep=keep).sort_values(), ) def test_update(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) msg = "'other' must be a Series" with self.assertRaisesRegex(ValueError, msg): kser.update(10) def test_where(self): pser1 = pd.Series([0, 1, 2, 3, 4], name=0) kser1 = ks.from_pandas(pser1) self.assert_eq(repr(pser1.where(pser1 > 3)), repr(kser1.where(kser1 > 3).sort_index())) def test_mask(self): pser1 = pd.Series([0, 1, 2, 3, 4], name=0) kser1 = ks.from_pandas(pser1) self.assert_eq(repr(pser1.mask(pser1 > 3)), repr(kser1.mask(kser1 > 3).sort_index())) def test_truncate(self): pser1 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7]) kser1 = ks.Series(pser1) pser2 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[7, 6, 5, 4, 3, 2, 1]) kser2 = ks.Series(pser2) self.assert_eq(kser1.truncate(), pser1.truncate()) self.assert_eq(kser1.truncate(before=2), pser1.truncate(before=2)) self.assert_eq(kser1.truncate(after=5), pser1.truncate(after=5)) self.assert_eq(kser1.truncate(copy=False), pser1.truncate(copy=False)) self.assert_eq(kser1.truncate(2, 5, copy=False), pser1.truncate(2, 5, copy=False)) self.assert_eq(kser2.truncate(4, 6), pser2.truncate(4, 6)) self.assert_eq(kser2.truncate(4, 6, copy=False), pser2.truncate(4, 6, copy=False)) kser = ks.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 3, 2, 1]) msg = "truncate requires a sorted index" with self.assertRaisesRegex(ValueError, msg): kser.truncate() kser = ks.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7]) msg = "Truncate: 2 must be after 5" with self.assertRaisesRegex(ValueError, msg): kser.truncate(5, 2) def test_getitem(self): pser = pd.Series([10, 20, 15, 30, 45], ["A", "A", "B", "C", "D"]) kser = ks.Series(pser) self.assert_eq(kser["A"], pser["A"]) self.assert_eq(kser["B"], pser["B"]) self.assert_eq(kser[kser > 15], pser[pser > 15]) # for MultiIndex midx = pd.MultiIndex( [["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]], ) pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], name="0", index=midx) kser = ks.Series(pser) self.assert_eq(kser["a"], pser["a"]) self.assert_eq(kser["a", "lama"], pser["a", "lama"]) self.assert_eq(kser[kser > 1.5], pser[pser > 1.5]) msg = r"'Key length \(4\) exceeds index depth \(3\)'" with self.assertRaisesRegex(KeyError, msg): kser[("a", "lama", "speed", "x")] def test_keys(self): midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) pser = kser.to_pandas() self.assert_eq(kser.keys(), pser.keys()) def test_index(self): # to check setting name of Index properly. idx = pd.Index([1, 2, 3, 4, 5, 6, 7, 8, 9]) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=idx) pser = kser.to_pandas() kser.name = "koalas" pser.name = "koalas" self.assert_eq(kser.index.name, pser.index.name) # for check setting names of MultiIndex properly. kser.names = ["hello", "koalas"] pser.names = ["hello", "koalas"] self.assert_eq(kser.index.names, pser.index.names) def test_pct_change(self): kser = ks.Series([90, 91, 85], index=[2, 4, 1]) pser = kser.to_pandas() self.assert_eq(kser.pct_change(periods=-1), pser.pct_change(periods=-1), almost=True) self.assert_eq( kser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000), almost=True ) self.assert_eq( kser.pct_change(periods=100000000), pser.pct_change(periods=100000000), almost=True ) # for MultiIndex midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) pser = kser.to_pandas() self.assert_eq(kser.pct_change(), pser.pct_change(), almost=True) self.assert_eq(kser.pct_change(periods=2), pser.pct_change(periods=2), almost=True) self.assert_eq(kser.pct_change(periods=-1), pser.pct_change(periods=-1), almost=True) self.assert_eq( kser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000), almost=True ) self.assert_eq( kser.pct_change(periods=100000000), pser.pct_change(periods=100000000), almost=True ) def test_axes(self): kser = ks.Series([90, 91, 85], index=[2, 4, 1]) pser = kser.to_pandas() self.assert_list_eq(kser.axes, pser.axes) # for MultiIndex midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) pser = kser.to_pandas() self.assert_list_eq(kser.axes, pser.axes) def test_combine_first(self): kser1 = ks.Series({"falcon": 330.0, "eagle": 160.0}) kser2 = ks.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0}) pser1 = kser1.to_pandas() pser2 = kser2.to_pandas() self.assert_eq( kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index() ) with self.assertRaisesRegex( ValueError, "`combine_first` only allows `Series` for parameter `other`" ): kser1.combine_first(50) kser1.name = ("X", "A") kser2.name = ("Y", "B") pser1.name = ("X", "A") pser2.name = ("Y", "B") self.assert_eq( kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index() ) # MultiIndex midx1 = pd.MultiIndex( [["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]], [[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]], ) midx2 = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser1 = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1) kser2 = ks.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2) pser1 = kser1.to_pandas() pser2 = kser2.to_pandas() self.assert_eq( kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index() ) # Series come from same DataFrame kdf = ks.DataFrame( { "A": {"falcon": 330.0, "eagle": 160.0}, "B": {"falcon": 345.0, "eagle": 200.0, "duck": 30.0}, } ) kser1 = kdf.A kser2 = kdf.B pser1 = kser1.to_pandas() pser2 = kser2.to_pandas() self.assert_eq( kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index() ) kser1.name = ("X", "A") kser2.name = ("Y", "B") pser1.name = ("X", "A") pser2.name = ("Y", "B") self.assert_eq( kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index() ) def test_udt(self): sparse_values = {0: 0.1, 1: 1.1} sparse_vector = SparseVector(len(sparse_values), sparse_values) pser = pd.Series([sparse_vector]) if LooseVersion(pyspark.__version__) < LooseVersion("2.4"): with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): kser = ks.from_pandas(pser) self.assert_eq(kser, pser) else: kser = ks.from_pandas(pser) self.assert_eq(kser, pser) def test_repeat(self): pser = pd.Series(["a", "b", "c"], name="0", index=np.random.rand(3)) kser = ks.from_pandas(pser) self.assert_eq(kser.repeat(3).sort_index(), pser.repeat(3).sort_index()) self.assert_eq(kser.repeat(0).sort_index(), pser.repeat(0).sort_index()) self.assertRaises(ValueError, lambda: kser.repeat(-1)) self.assertRaises(ValueError, lambda: kser.repeat("abc")) pdf = pd.DataFrame({"a": ["a", "b", "c"], "rep": [10, 20, 30]}, index=np.random.rand(3)) kdf = ks.from_pandas(pdf) if LooseVersion(pyspark.__version__) < LooseVersion("2.4"): self.assertRaises(ValueError, lambda: kdf.a.repeat(kdf.rep)) else: self.assert_eq(kdf.a.repeat(kdf.rep).sort_index(), pdf.a.repeat(pdf.rep).sort_index()) def test_take(self): pser = pd.Series([100, 200, 300, 400, 500], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(kser.take([0, 2, 4]).sort_values(), pser.take([0, 2, 4]).sort_values()) self.assert_eq( kser.take(range(0, 5, 2)).sort_values(), pser.take(range(0, 5, 2)).sort_values() ) self.assert_eq(kser.take([-4, -2, 0]).sort_values(), pser.take([-4, -2, 0]).sort_values()) self.assert_eq( kser.take(range(-2, 1, 2)).sort_values(), pser.take(range(-2, 1, 2)).sort_values() ) # Checking the type of indices. self.assertRaises(ValueError, lambda: kser.take(1)) self.assertRaises(ValueError, lambda: kser.take("1")) self.assertRaises(ValueError, lambda: kser.take({1, 2})) self.assertRaises(ValueError, lambda: kser.take({1: None, 2: None})) def test_divmod(self): pser = pd.Series([100, None, 300, None, 500], name="Koalas") kser = ks.from_pandas(pser) if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): self.assert_eq(repr(kser.divmod(-100)), repr(pser.divmod(-100))) self.assert_eq(repr(kser.divmod(100)), repr(pser.divmod(100))) elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"): expected_result = repr((pser.floordiv(-100), pser.mod(-100))) self.assert_eq(repr(kser.divmod(-100)), expected_result) expected_result = repr((pser.floordiv(100), pser.mod(100))) self.assert_eq(repr(kser.divmod(100)), expected_result) def test_rdivmod(self): pser = pd.Series([100, None, 300, None, 500], name="Koalas") kser = ks.from_pandas(pser) if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): self.assert_eq(repr(kser.rdivmod(-100)), repr(pser.rdivmod(-100))) self.assert_eq(repr(kser.rdivmod(100)), repr(pser.rdivmod(100))) elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"): expected_result = repr((pser.rfloordiv(-100), pser.rmod(-100))) self.assert_eq(repr(kser.rdivmod(-100)), expected_result) expected_result = repr((pser.rfloordiv(100), pser.rmod(100))) self.assert_eq(repr(kser.rdivmod(100)), expected_result) def test_mod(self): pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(repr(kser.mod(-150)), repr(pser.mod(-150))) self.assert_eq(repr(kser.mod(0)), repr(pser.mod(0))) self.assert_eq(repr(kser.mod(150)), repr(pser.mod(150))) pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6}) kdf = ks.from_pandas(pdf) self.assert_eq(repr(kdf.a.mod(kdf.b)), repr(pdf.a.mod(pdf.b).rename("a"))) def test_rmod(self): pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(repr(kser.rmod(-150)), repr(pser.rmod(-150))) self.assert_eq(repr(kser.rmod(0)), repr(pser.rmod(0))) self.assert_eq(repr(kser.rmod(150)), repr(pser.rmod(150))) pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6}) kdf = ks.from_pandas(pdf) self.assert_eq(repr(kdf.a.rmod(kdf.b)), repr(pdf.a.rmod(pdf.b).rename("a"))) def test_asof(self): pser = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(repr(kser.asof(20)), repr(pser.asof(20))) self.assert_eq(repr(kser.asof([5, 20]).sort_index()), repr(pser.asof([5, 20]).sort_index())) self.assert_eq(repr(kser.asof(100)), repr(pser.asof(100))) self.assert_eq(repr(kser.asof(-100)), repr(pser.asof(-100))) self.assert_eq(repr(kser.asof(-100)), repr(pser.asof(-100))) self.assert_eq( repr(kser.asof([-100, 100]).sort_index()), repr(pser.asof([-100, 100]).sort_index()) ) # where cannot be an Index, Series or a DataFrame self.assertRaises(ValueError, lambda: kser.asof(ks.Index([-100, 100]))) self.assertRaises(ValueError, lambda: kser.asof(ks.Series([-100, 100]))) self.assertRaises(ValueError, lambda: kser.asof(ks.DataFrame({"A": [1, 2, 3]}))) # asof is not supported for a MultiIndex pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c"), ("y", "d")]) kser = ks.from_pandas(pser) self.assertRaises(ValueError, lambda: kser.asof(20)) # asof requires a sorted index (More precisely, should be a monotonic increasing) kser = ks.Series([1, 2, np.nan, 4], index=[10, 30, 20, 40], name="Koalas") self.assertRaises(ValueError, lambda: kser.asof(20)) kser = ks.Series([1, 2, np.nan, 4], index=[40, 30, 20, 10], name="Koalas") self.assertRaises(ValueError, lambda: kser.asof(20)) def test_squeeze(self): # Single value kser = ks.Series([90]) pser = kser.to_pandas() self.assert_eq(kser.squeeze(), pser.squeeze()) # Single value with MultiIndex midx = pd.MultiIndex.from_tuples([("a", "b", "c")]) kser = ks.Series([90], index=midx) pser = kser.to_pandas() self.assert_eq(kser.squeeze(), pser.squeeze()) # Multiple values kser = ks.Series([90, 91, 85]) pser = kser.to_pandas() self.assert_eq(kser.squeeze(), pser.squeeze()) # Multiple values with MultiIndex midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) kser = ks.Series([90, 91, 85], index=midx) pser = kser.to_pandas() self.assert_eq(kser.squeeze(), pser.squeeze()) def test_div_zero_and_nan(self): pser = pd.Series([100, None, -300, None, 500, -700, np.inf, -np.inf], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(repr(pser.div(0)), repr(kser.div(0))) self.assert_eq(repr(pser.truediv(0)), repr(kser.truediv(0))) self.assert_eq(repr(pser / 0), repr(kser / 0)) self.assert_eq(repr(pser.div(np.nan)), repr(kser.div(np.nan))) self.assert_eq(repr(pser.truediv(np.nan)), repr(kser.truediv(np.nan))) self.assert_eq(repr(pser / np.nan), repr(kser / np.nan)) # floordiv has different behavior in pandas > 1.0.0 when divide by 0 if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): self.assert_eq(repr(pser.floordiv(0)), repr(kser.floordiv(0))) self.assert_eq(repr(pser // 0), repr(kser // 0)) else: result = pd.Series( [np.inf, np.nan, -np.inf, np.nan, np.inf, -np.inf, np.inf, -np.inf], name="Koalas" ) self.assert_eq(repr(kser.floordiv(0)), repr(result)) self.assert_eq(repr(kser // 0), repr(result)) self.assert_eq(repr(pser.floordiv(np.nan)), repr(kser.floordiv(np.nan))) def test_mad(self): pser = pd.Series([1, 2, 3, 4], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(pser.mad(), kser.mad()) pser = pd.Series([None, -2, 5, 10, 50, np.nan, -20], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(pser.mad(), kser.mad()) pmidx = pd.MultiIndex.from_tuples( [("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")] ) pser = pd.Series([1, 2, 3, 4, 5], name="Koalas") pser.index = pmidx kser = ks.from_pandas(pser) self.assert_eq(pser.mad(), kser.mad()) pmidx = pd.MultiIndex.from_tuples( [("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")] ) pser = pd.Series([None, -2, 5, 50, np.nan], name="Koalas") pser.index = pmidx kser = ks.from_pandas(pser) self.assert_eq(pser.mad(), kser.mad()) def test_to_frame(self): kser = ks.Series(["a", "b", "c"]) pser = kser.to_pandas() self.assert_eq(pser.to_frame(name="a"), kser.to_frame(name="a")) # for MultiIndex midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) kser = ks.Series(["a", "b", "c"], index=midx) pser = kser.to_pandas() self.assert_eq(pser.to_frame(name="a"), kser.to_frame(name="a")) def test_shape(self): kser = ks.Series(["a", "b", "c"]) pser = kser.to_pandas() self.assert_eq(pser.shape, kser.shape) # for MultiIndex midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) kser = ks.Series(["a", "b", "c"], index=midx) pser = kser.to_pandas() self.assert_eq(pser.shape, kser.shape) def test_to_markdown(self): pser = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") kser = ks.from_pandas(pser) # `to_markdown()` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0. if LooseVersion(pd.__version__) < LooseVersion("1.0.0"): self.assertRaises(NotImplementedError, lambda: kser.to_markdown()) else: self.assert_eq(pser.to_markdown(), kser.to_markdown()) def test_unstack(self): pser = pd.Series( [10, -2, 4, 7], index=pd.MultiIndex.from_tuples( [("one", "a", "z"), ("one", "b", "x"), ("two", "a", "c"), ("two", "b", "v")], names=["A", "B", "C"], ), ) kser = ks.from_pandas(pser) levels = [-3, -2, -1, 0, 1, 2] for level in levels: pandas_result = pser.unstack(level=level) koalas_result = kser.unstack(level=level).sort_index() self.assert_eq(pandas_result, koalas_result) self.assert_eq(pandas_result.index.names, koalas_result.index.names) self.assert_eq(pandas_result.columns.names, koalas_result.columns.names) # non-numeric datatypes pser = pd.Series( list("abcd"), index=pd.MultiIndex.from_product([["one", "two"], ["a", "b"]]) ) kser = ks.from_pandas(pser) levels = [-2, -1, 0, 1] for level in levels: pandas_result = pser.unstack(level=level) koalas_result = kser.unstack(level=level).sort_index() self.assert_eq(pandas_result, koalas_result) self.assert_eq(pandas_result.index.names, koalas_result.index.names) self.assert_eq(pandas_result.columns.names, koalas_result.columns.names) # Exceeding the range of level self.assertRaises(IndexError, lambda: kser.unstack(level=3)) self.assertRaises(IndexError, lambda: kser.unstack(level=-4)) # Only support for MultiIndex kser = ks.Series([10, -2, 4, 7]) self.assertRaises(ValueError, lambda: kser.unstack()) def test_item(self): kser = ks.Series([10, 20]) self.assertRaises(ValueError, lambda: kser.item()) def test_filter(self): pser = pd.Series([0, 1, 2], index=["one", "two", "three"]) kser = ks.from_pandas(pser) self.assert_eq(pser.filter(items=["one", "three"]), kser.filter(items=["one", "three"])) self.assert_eq(pser.filter(regex="e$"), kser.filter(regex="e$")) self.assert_eq(pser.filter(like="hre"), kser.filter(like="hre")) with self.assertRaisesRegex(ValueError, "Series does not support columns axis."): kser.filter(like="hre", axis=1) # for MultiIndex midx = pd.MultiIndex.from_tuples([("one", "x"), ("two", "y"), ("three", "z")]) pser = pd.Series([0, 1, 2], index=midx) kser = ks.from_pandas(pser) self.assert_eq( pser.filter(items=[("one", "x"), ("three", "z")]), kser.filter(items=[("one", "x"), ("three", "z")]), ) with self.assertRaisesRegex(TypeError, "Unsupported type <class 'list'>"): kser.filter(items=[["one", "x"], ("three", "z")]) with self.assertRaisesRegex(ValueError, "The item should not be empty."): kser.filter(items=[(), ("three", "z")]) def test_abs(self): pser = pd.Series([-2, -1, 0, 1]) kser = ks.from_pandas(pser) self.assert_eq(abs(kser), abs(pser)) self.assert_eq(np.abs(kser), np.abs(pser)) def test_bfill(self): pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x") kser = ks.from_pandas(pser) self.assert_eq(kser.bfill(), pser.bfill()) kser.bfill(inplace=True) pser.bfill(inplace=True) self.assert_eq(kser, pser) def test_ffill(self): pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x") kser = ks.from_pandas(pser) self.assert_eq(repr(kser.ffill()), repr(pser.ffill())) kser.ffill(inplace=True) pser.ffill(inplace=True) self.assert_eq(repr(kser), repr(pser))
1
15,540
Hm .. so this case doesn't work anymore?
databricks-koalas
py
@@ -22,7 +22,8 @@ import os import tempfile -from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QProcess +from PyQt5.QtCore import (pyqtSignal, pyqtSlot, QObject, QProcess, + QFileSystemWatcher) from qutebrowser.config import config from qutebrowser.utils import message, log
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Launcher for an external editor.""" import os import tempfile from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QProcess from qutebrowser.config import config from qutebrowser.utils import message, log from qutebrowser.misc import guiprocess class ExternalEditor(QObject): """Class to simplify editing a text in an external editor. Attributes: _text: The current text before the editor is opened. _filename: The name of the file to be edited. _remove_file: Whether the file should be removed when the editor is closed. _proc: The GUIProcess of the editor. """ editing_finished = pyqtSignal(str) def __init__(self, parent=None): super().__init__(parent) self._filename = None self._proc = None self._remove_file = None def _cleanup(self): """Clean up temporary files after the editor closed.""" assert self._remove_file is not None if self._filename is None or not self._remove_file: # Could not create initial file. return try: if self._proc.exit_status() != QProcess.CrashExit: os.remove(self._filename) except OSError as e: # NOTE: Do not replace this with "raise CommandError" as it's # executed async. message.error("Failed to delete tempfile... ({})".format(e)) @pyqtSlot(int, QProcess.ExitStatus) def on_proc_closed(self, exitcode, exitstatus): """Write the editor text into the form field and clean up tempfile. Callback for QProcess when the editor was closed. """ log.procs.debug("Editor closed") if exitstatus != QProcess.NormalExit: # No error/cleanup here, since we already handle this in # on_proc_error. return try: if exitcode != 0: return encoding = config.val.editor.encoding try: with open(self._filename, 'r', encoding=encoding) as f: text = f.read() except OSError as e: # NOTE: Do not replace this with "raise CommandError" as it's # executed async. message.error("Failed to read back edited file: {}".format(e)) return log.procs.debug("Read back: {}".format(text)) self.editing_finished.emit(text) finally: self._cleanup() @pyqtSlot(QProcess.ProcessError) def on_proc_error(self, _err): self._cleanup() def edit(self, text, caret_position=None): """Edit a given text. Args: text: The initial text to edit. caret_position: The position of the caret in the text. """ if self._filename is not None: raise ValueError("Already editing a file!") try: # Close while the external process is running, as otherwise systems # with exclusive write access (e.g. Windows) may fail to update # the file from the external editor, see # https://github.com/qutebrowser/qutebrowser/issues/1767 with tempfile.NamedTemporaryFile( # pylint: disable=bad-continuation mode='w', prefix='qutebrowser-editor-', encoding=config.val.editor.encoding, delete=False) as fobj: # pylint: enable=bad-continuation if text: fobj.write(text) self._filename = fobj.name except OSError as e: message.error("Failed to create initial file: {}".format(e)) return self._remove_file = True line, column = self._calc_line_and_column(text, caret_position) self._start_editor(line=line, column=column) def edit_file(self, filename): """Edit the file with the given filename.""" self._filename = filename self._remove_file = False self._start_editor() def _start_editor(self, line=1, column=1): """Start the editor with the file opened as self._filename. Args: line: the line number to pass to the editor column: the column number to pass to the editor """ self._proc = guiprocess.GUIProcess(what='editor', parent=self) self._proc.finished.connect(self.on_proc_closed) self._proc.error.connect(self.on_proc_error) editor = config.val.editor.command executable = editor[0] args = [self._sub_placeholder(arg, line, column) for arg in editor[1:]] log.procs.debug("Calling \"{}\" with args {}".format(executable, args)) self._proc.start(executable, args) def _calc_line_and_column(self, text, caret_position): r"""Calculate line and column numbers given a text and caret position. Both line and column are 1-based indexes, because that's what most editors use as line and column starting index. By "most" we mean at least vim, nvim, gvim, emacs, atom, sublimetext, notepad++, brackets, visual studio, QtCreator and so on. To find the line we just count how many newlines there are before the caret and add 1. To find the column we calculate the difference between the caret and the last newline before the caret. For example in the text `aaa\nbb|bbb` (| represents the caret): caret_position = 6 text[:caret_position] = `aaa\nbb` text[:caret_position].count('\n') = 1 caret_position - text[:caret_position].rfind('\n') = 3 Thus line, column = 2, 3, and the caret is indeed in the second line, third column Args: text: the text for which the numbers must be calculated caret_position: the position of the caret in the text, or None Return: A (line, column) tuple of (int, int) """ if caret_position is None: return 1, 1 line = text[:caret_position].count('\n') + 1 column = caret_position - text[:caret_position].rfind('\n') return line, column def _sub_placeholder(self, arg, line, column): """Substitute a single placeholder. If the `arg` input to this function is a valid placeholder it will be substituted with the appropriate value, otherwise it will be left unchanged. Args: arg: an argument of editor.command. line: the previously-calculated line number for the text caret. column: the previously-calculated column number for the text caret. Return: The substituted placeholder or the original argument. """ replacements = { '{}': self._filename, '{file}': self._filename, '{line}': str(line), '{line0}': str(line-1), '{column}': str(column), '{column0}': str(column-1) } for old, new in replacements.items(): arg = arg.replace(old, new) return arg
1
20,180
Please indent this so it lines up with the `(`
qutebrowser-qutebrowser
py
@@ -23,10 +23,11 @@ function roots_scripts() { if (is_single() && comments_open() && get_option('thread_comments')) { wp_enqueue_script('comment-reply'); } - - wp_register_script('roots_plugins', get_template_directory_uri() . '/js/plugins.js', false, null, false); + + // Not included by default since this code is used for debugging javas in the console. Uncomment next line and wp_enqueue_script below to use it. + //wp_register_script('roots_plugins', get_template_directory_uri() . '/js/plugins.js', false, null, false); wp_register_script('roots_main', get_template_directory_uri() . '/js/main.js', false, null, false); - wp_enqueue_script('roots_plugins'); + //wp_enqueue_script('roots_plugins'); wp_enqueue_script('roots_main'); }
1
<?php function roots_scripts() { // Not included by default since Bootstrap's reset supersedes h5bp's. Include if you aren't using Bootstrap. //wp_enqueue_style('roots_style', get_template_directory_uri() . '/css/style.css', false, null); wp_enqueue_style('roots_bootstrap_style', get_template_directory_uri() . '/css/bootstrap.css', false, null); if (BOOTSTRAP_RESPONSIVE) { wp_enqueue_style('roots_bootstrap_responsive_style', get_template_directory_uri() . '/css/bootstrap-responsive.css', array('roots_bootstrap_style'), null); } wp_enqueue_style('roots_app_style', get_template_directory_uri() . '/css/app.css', false, null); if (is_child_theme()) { wp_enqueue_style('roots_child_style', get_stylesheet_uri()); } if (!is_admin()) { wp_deregister_script('jquery'); wp_register_script('jquery', '', '', '', false); } if (is_single() && comments_open() && get_option('thread_comments')) { wp_enqueue_script('comment-reply'); } wp_register_script('roots_plugins', get_template_directory_uri() . '/js/plugins.js', false, null, false); wp_register_script('roots_main', get_template_directory_uri() . '/js/main.js', false, null, false); wp_enqueue_script('roots_plugins'); wp_enqueue_script('roots_main'); } add_action('wp_enqueue_scripts', 'roots_scripts', 100);
1
7,652
We should still register the script. Just not enqueue it.
roots-sage
php
@@ -20,7 +20,7 @@ module Mongoid #:nodoc: def changes {}.tap do |hash| changed.each do |name| - change = attribute_change(name) + change = [changed_attributes[name], attributes[name]] if attribute_changed?(name) hash[name] = change if change[0] != change[1] end end
1
# encoding: utf-8 module Mongoid #:nodoc: module Dirty #:nodoc: extend ActiveSupport::Concern include ActiveModel::Dirty # Get the changed values for the document. This is a hash with the name of # the field as the keys, and the values being an array of previous and # current pairs. # # @example Get the changes. # document.changes # # @note This is overriding the AM::Dirty implementation to handle # enumerable fields being in the hash when not actually changed. # # @return [ Hash ] The changed values. # # @since 2.1.0 def changes {}.tap do |hash| changed.each do |name| change = attribute_change(name) hash[name] = change if change[0] != change[1] end end end # Call this method after save, so the changes can be properly switched. # # This will unset the memoized children array, set new record to # false, set the document as validated, and move the dirty changes. # # @example Move the changes to previous. # person.move_changes # # @since 2.1.0 def move_changes @_children = nil @previously_changed = changes @validated = false changed_attributes.clear end # Remove a change from the dirty attributes hash. Used by the single field # atomic updators. # # @example Remove a flagged change. # model.remove_change(:field) # # @param [ Symbol, String ] name The name of the field. # # @since 2.1.0 def remove_change(name) changed_attributes.delete(name.to_s) end # Gets all the new values for each of the changed fields, to be passed to # a MongoDB $set modifier. # # @example Get the setters for the atomic updates. # person = Person.new(:title => "Sir") # person.title = "Madam" # person.setters # returns { "title" => "Madam" } # # @return [ Hash ] A +Hash+ of atomic setters. def setters {}.tap do |modifications| changes.each_pair do |field, changes| key = embedded? ? "#{atomic_position}.#{field}" : field modifications[key] = changes[1] end end end end end
1
9,053
I think this can stay as a method. And we can write our own attribute_change(name) as required.
mongodb-mongoid
rb
@@ -142,7 +142,8 @@ namespace MvvmCross.DroidX.RecyclerView protected virtual View InflateViewForHolder(ViewGroup parent, int viewType, IMvxAndroidBindingContext bindingContext) { - return bindingContext.BindingInflate(viewType, parent, false); + var layoutId = ItemTemplateSelector.GetItemLayoutId (viewType); + return bindingContext.BindingInflate(layoutId, parent, false); } public override void OnBindViewHolder(AndroidX.RecyclerView.Widget.RecyclerView.ViewHolder holder, int position)
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MS-PL license. // See the LICENSE file in the project root for more information. using System; using System.Collections; using System.Collections.Specialized; using System.Windows.Input; using Android.OS; using Android.Runtime; using Android.Views; using Android.Widget; using MvvmCross.Binding; using MvvmCross.Binding.Attributes; using MvvmCross.Binding.Extensions; using MvvmCross.DroidX.RecyclerView.ItemTemplates; using MvvmCross.DroidX.RecyclerView.Model; using MvvmCross.Logging; using MvvmCross.Platforms.Android.Binding.BindingContext; using MvvmCross.WeakSubscription; using Object = Java.Lang.Object; namespace MvvmCross.DroidX.RecyclerView { [Register("mvvmcross.droidx.recyclerview.MvxRecyclerAdapter")] public class MvxRecyclerAdapter : AndroidX.RecyclerView.Widget.RecyclerView.Adapter, IMvxRecyclerAdapter, IMvxRecyclerAdapterBindableHolder { private ICommand _itemClick, _itemLongClick; private IEnumerable _itemsSource; private IDisposable _subscription; private IMvxTemplateSelector _itemTemplateSelector; protected IMvxAndroidBindingContext BindingContext { get; } public bool ReloadOnAllItemsSourceSets { get; set; } public MvxRecyclerAdapter() : this(null) { } public MvxRecyclerAdapter(IMvxAndroidBindingContext bindingContext) { BindingContext = bindingContext ?? MvxAndroidBindingContextHelpers.Current(); } protected MvxRecyclerAdapter(IntPtr javaReference, JniHandleOwnership transfer) : base(javaReference, transfer) { } [MvxSetToNullAfterBinding] public ICommand ItemClick { get => _itemClick; set { if (ReferenceEquals(_itemClick, value)) return; if (_itemClick != null && value != null) MvxAndroidLog.Instance.Warn("Changing ItemClick may cause inconsistencies where some items still call the old command."); _itemClick = value; } } [MvxSetToNullAfterBinding] public ICommand ItemLongClick { get => _itemLongClick; set { if (ReferenceEquals(_itemLongClick, value)) return; if (_itemLongClick != null && value != null) MvxAndroidLog.Instance.Warn("Changing ItemLongClick may cause inconsistencies where some items still call the old command."); _itemLongClick = value; } } [MvxSetToNullAfterBinding] public virtual IEnumerable ItemsSource { get => _itemsSource; set => SetItemsSource(value); } [MvxSetToNullAfterBinding] public virtual IMvxTemplateSelector ItemTemplateSelector { get => _itemTemplateSelector; set { if (ReferenceEquals(_itemTemplateSelector, value)) return; _itemTemplateSelector = value; // since the template selector has changed then let's force the list to redisplay by firing NotifyDataSetChanged() if (_itemsSource != null) NotifyDataSetChanged(); } } public override void OnViewAttachedToWindow(Object holder) { base.OnViewAttachedToWindow(holder); var viewHolder = (IMvxRecyclerViewHolder)holder; viewHolder.OnAttachedToWindow(); } public override void OnViewDetachedFromWindow(Object holder) { var viewHolder = (IMvxRecyclerViewHolder)holder; viewHolder.OnDetachedFromWindow(); base.OnViewDetachedFromWindow(holder); } public override int GetItemViewType(int position) { var itemAtPosition = GetItem(position); var viewTypeIndex = ItemTemplateSelector.GetItemViewType(itemAtPosition); var viewType = ItemTemplateSelector.GetItemLayoutId(viewTypeIndex); return viewType; } public override AndroidX.RecyclerView.Widget.RecyclerView.ViewHolder OnCreateViewHolder(ViewGroup parent, int viewType) { var itemBindingContext = new MvxAndroidBindingContext(parent.Context, BindingContext.LayoutInflaterHolder); var viewHolder = new MvxRecyclerViewHolder(InflateViewForHolder(parent, viewType, itemBindingContext), itemBindingContext) { Id = viewType }; return viewHolder; } protected virtual View InflateViewForHolder(ViewGroup parent, int viewType, IMvxAndroidBindingContext bindingContext) { return bindingContext.BindingInflate(viewType, parent, false); } public override void OnBindViewHolder(AndroidX.RecyclerView.Widget.RecyclerView.ViewHolder holder, int position) { var dataContext = GetItem(position); var viewHolder = (IMvxRecyclerViewHolder)holder; viewHolder.DataContext = dataContext; if (viewHolder.Id == global::Android.Resource.Layout.SimpleListItem1) ((TextView)holder.ItemView).Text = dataContext?.ToString(); viewHolder.Click -= OnItemViewClick; viewHolder.LongClick -= OnItemViewLongClick; viewHolder.Click += OnItemViewClick; viewHolder.LongClick += OnItemViewLongClick; OnMvxViewHolderBound(new MvxViewHolderBoundEventArgs(position, dataContext, holder)); } public override void OnViewRecycled(Object holder) { var viewHolder = (IMvxRecyclerViewHolder)holder; viewHolder.Click -= OnItemViewClick; viewHolder.LongClick -= OnItemViewLongClick; viewHolder.OnViewRecycled(); } public override void OnDetachedFromRecyclerView(AndroidX.RecyclerView.Widget.RecyclerView recyclerView) { base.OnDetachedFromRecyclerView(recyclerView); Clean(false); } /// <summary> /// By default, force recycling a view if it has animations /// </summary> public override bool OnFailedToRecycleView(Object holder) => true; protected virtual void OnItemViewClick(object sender, EventArgs e) { var holder = (IMvxRecyclerViewHolder)sender; ExecuteCommandOnItem(ItemClick, holder.DataContext); } protected virtual void OnItemViewLongClick(object sender, EventArgs e) { var holder = (IMvxRecyclerViewHolder)sender; ExecuteCommandOnItem(ItemLongClick, holder.DataContext); } protected virtual void ExecuteCommandOnItem(ICommand command, object itemDataContext) { if (command != null && itemDataContext != null && command.CanExecute(itemDataContext)) command.Execute(itemDataContext); } public override int ItemCount => _itemsSource.Count(); public virtual object GetItem(int viewPosition) { var itemsSourcePosition = GetItemsSourcePosition(viewPosition); //Do not check if viewPosition is in the range for non IList enumerables. Ie: do not call Count() on the list, as it triggers a full enumeration and kills performance for a streaming source. if (ItemsSource is IList items) { if (itemsSourcePosition >= 0 && itemsSourcePosition < items.Count) return items[itemsSourcePosition]; MvxAndroidLog.Instance.Error($"MvxRecyclerView GetItem index out of range. viewPosition:{viewPosition} itemsSourcePosition:{itemsSourcePosition} itemCount:{_itemsSource.Count()}"); //We should trigger an exception instead of hiding it here, as it means you have bugs in your code. return null; } //May crash if itemsSourcePosition is out or range. Which should never happen anyway, except when you have bugs in your code. return _itemsSource.ElementAt(itemsSourcePosition); } protected virtual int GetViewPosition(object item) { var itemsSourcePosition = _itemsSource.GetPosition(item); return GetViewPosition(itemsSourcePosition); } protected virtual int GetViewPosition(int itemsSourcePosition) { return itemsSourcePosition; } protected virtual int GetItemsSourcePosition(int viewPosition) { return viewPosition; } public int ItemTemplateId { get; set; } protected virtual void SetItemsSource(IEnumerable value) { if (Looper.MainLooper != Looper.MyLooper()) MvxAndroidLog.Instance.Error("ItemsSource property set on a worker thread. This leads to crash in the RecyclerView. It must be set only from the main thread."); if (ReferenceEquals(_itemsSource, value) && !ReloadOnAllItemsSourceSets) return; _subscription?.Dispose(); _subscription = null; if (value != null && !(value is IList)) { MvxBindingLog.Warning("Binding to IEnumerable rather than IList - this can be inefficient, especially for large lists"); } if (value is INotifyCollectionChanged newObservable) _subscription = newObservable.WeakSubscribe(OnItemsSourceCollectionChanged); _itemsSource = value; NotifyDataSetChanged(); } protected virtual void OnItemsSourceCollectionChanged(object sender, NotifyCollectionChangedEventArgs e) { if (_subscription == null || _itemsSource == null) //Object disposed return; if (Looper.MainLooper == Looper.MyLooper()) NotifyDataSetChanged(e); else MvxAndroidLog.Instance.Error("ItemsSource collection content changed on a worker thread. This leads to crash in the RecyclerView as it will not be aware of changes immediatly and may get a deleted item or update an item with a bad item template. All changes must be synchronized on the main thread."); } public virtual void NotifyDataSetChanged(NotifyCollectionChangedEventArgs e) { switch (e.Action) { case NotifyCollectionChangedAction.Add: NotifyItemRangeInserted(GetViewPosition(e.NewStartingIndex), e.NewItems.Count); break; case NotifyCollectionChangedAction.Move: for (var i = 0; i < e.NewItems.Count; i++) NotifyItemMoved(GetViewPosition(e.OldStartingIndex + i), GetViewPosition(e.NewStartingIndex + i)); break; case NotifyCollectionChangedAction.Replace: NotifyItemRangeChanged(GetViewPosition(e.NewStartingIndex), e.NewItems.Count); break; case NotifyCollectionChangedAction.Remove: NotifyItemRangeRemoved(GetViewPosition(e.OldStartingIndex), e.OldItems.Count); break; case NotifyCollectionChangedAction.Reset: NotifyDataSetChanged(); break; } } public event Action<MvxViewHolderBoundEventArgs> MvxViewHolderBound; protected virtual void OnMvxViewHolderBound(MvxViewHolderBoundEventArgs obj) { MvxViewHolderBound?.Invoke(obj); } private void Clean(bool disposing) { if (disposing) { _subscription?.Dispose(); _subscription = null; _itemClick = null; _itemLongClick = null; _itemsSource = null; _itemTemplateSelector = null; } } /// <summary> /// Always called with disposing = false, as it is only disposed from java /// </summary> protected override void Dispose(bool disposing) { Clean(true); base.Dispose(disposing); } } }
1
15,231
@alexshikov @Cheesebaron I might be missing something but isn't the viewType parameter here the actual layout resource id? Why would the GetItemLayoutId method need to be called again? On line 127, the exact same method is called except this time passing in the index of the object in the backing data source, which makes sense - the template selector can return the layout id based on the index of the item in the list.
MvvmCross-MvvmCross
.cs
@@ -96,6 +96,9 @@ class BQTable(collections.namedtuple('BQTable', 'project_id dataset_id table_id' return "bq://" + self.project_id + "/" + \ self.dataset.dataset_id + "/" + self.table_id + def __str__(self): + return "%s:%s.%s" % (self.project_id, self.dataset_id, self.table_id) + class BigqueryClient(object): """A client for Google BigQuery.
1
# -*- coding: utf-8 -*- # # Copyright 2015 Twitter Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections import logging import luigi.target import time logger = logging.getLogger('luigi-interface') try: import httplib2 import oauth2client from googleapiclient import discovery from googleapiclient import http except ImportError: logger.warning('Bigquery module imported, but google-api-python-client is ' 'not installed. Any bigquery task will fail') class CreateDisposition(object): CREATE_IF_NEEDED = 'CREATE_IF_NEEDED' CREATE_NEVER = 'CREATE_NEVER' class WriteDisposition(object): WRITE_TRUNCATE = 'WRITE_TRUNCATE' WRITE_APPEND = 'WRITE_APPEND' WRITE_EMPTY = 'WRITE_EMPTY' class QueryMode(object): INTERACTIVE = 'INTERACTIVE' BATCH = 'BATCH' class SourceFormat(object): CSV = 'CSV' DATASTORE_BACKUP = 'DATASTORE_BACKUP' NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON' class FieldDelimiter(object): """ The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (','). https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load """ COMMA = ',' # Default TAB = "\t" PIPE = "|" class Encoding(object): """ [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties. """ UTF_8 = 'UTF-8' ISO_8859_1 = 'ISO-8859-1' BQDataset = collections.namedtuple('BQDataset', 'project_id dataset_id') class BQTable(collections.namedtuple('BQTable', 'project_id dataset_id table_id')): @property def dataset(self): return BQDataset(project_id=self.project_id, dataset_id=self.dataset_id) @property def uri(self): return "bq://" + self.project_id + "/" + \ self.dataset.dataset_id + "/" + self.table_id class BigqueryClient(object): """A client for Google BigQuery. For details of how authentication and the descriptor work, see the documentation for the GCS client. The descriptor URL for BigQuery is https://www.googleapis.com/discovery/v1/apis/bigquery/v2/rest """ def __init__(self, oauth_credentials=None, descriptor='', http_=None): http_ = http_ or httplib2.Http() if not oauth_credentials: oauth_credentials = oauth2client.client.GoogleCredentials.get_application_default() if descriptor: self.client = discovery.build_from_document(descriptor, credentials=oauth_credentials, http=http_) else: self.client = discovery.build('bigquery', 'v2', credentials=oauth_credentials, http=http_) def dataset_exists(self, dataset): """Returns whether the given dataset exists. :param dataset: :type dataset: BQDataset """ try: self.client.datasets().get(projectId=dataset.project_id, datasetId=dataset.dataset_id).execute() except http.HttpError as ex: if ex.resp.status == 404: return False raise return True def table_exists(self, table): """Returns whether the given table exists. :param table: :type table: BQTable """ if not self.dataset_exists(table.dataset): return False try: self.client.tables().get(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id).execute() except http.HttpError as ex: if ex.resp.status == 404: return False raise return True def make_dataset(self, dataset, raise_if_exists=False, body={}): """Creates a new dataset with the default permissions. :param dataset: :type dataset: BQDataset :param raise_if_exists: whether to raise an exception if the dataset already exists. :raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists """ try: self.client.datasets().insert(projectId=dataset.project_id, body=dict( {'id': '{}:{}'.format(dataset.project_id, dataset.dataset_id)}, **body)).execute() except http.HttpError as ex: if ex.resp.status == 409: if raise_if_exists: raise luigi.target.FileAlreadyExists() else: raise def delete_dataset(self, dataset, delete_nonempty=True): """Deletes a dataset (and optionally any tables in it), if it exists. :param dataset: :type dataset: BQDataset :param delete_nonempty: if true, will delete any tables before deleting the dataset """ if not self.dataset_exists(dataset): return self.client.datasets().delete(projectId=dataset.project_id, datasetId=dataset.dataset_id, deleteContents=delete_nonempty).execute() def delete_table(self, table): """Deletes a table, if it exists. :param table: :type table: BQTable """ if not self.table_exists(table): return self.client.tables().delete(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id).execute() def list_datasets(self, project_id): """Returns the list of datasets in a given project. :param project_id: :type project_id: str """ request = self.client.datasets().list(projectId=project_id, maxResults=1000) response = request.execute() while response is not None: for ds in response.get('datasets', []): yield ds['datasetReference']['datasetId'] request = self.client.datasets().list_next(request, response) if request is None: break response = request.execute() def list_tables(self, dataset): """Returns the list of tables in a given dataset. :param dataset: :type dataset: BQDataset """ request = self.client.tables().list(projectId=dataset.project_id, datasetId=dataset.dataset_id, maxResults=1000) response = request.execute() while response is not None: for t in response.get('tables', []): yield t['tableReference']['tableId'] request = self.client.tables().list_next(request, response) if request is None: break response = request.execute() def get_view(self, table): """Returns the SQL query for a view, or None if it doesn't exist or is not a view. :param table: The table containing the view. :type table: BQTable """ request = self.client.tables().get(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id) try: response = request.execute() except http.HttpError as ex: if ex.resp.status == 404: return None raise return response['view']['query'] if 'view' in response else None def update_view(self, table, view): """Updates the SQL query for a view. If the output table exists, it is replaced with the supplied view query. Otherwise a new table is created with this view. :param table: The table to contain the view. :type table: BQTable :param view: The SQL query for the view. :type view: str """ body = { 'tableReference': { 'projectId': table.project_id, 'datasetId': table.dataset_id, 'tableId': table.table_id }, 'view': { 'query': view } } if self.table_exists(table): self.client.tables().update(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id, body=body).execute() else: self.client.tables().insert(projectId=table.project_id, datasetId=table.dataset_id, body=body).execute() def run_job(self, project_id, body, dataset=None): """Runs a bigquery "job". See the documentation for the format of body. .. note:: You probably don't need to use this directly. Use the tasks defined below. :param dataset: :type dataset: BQDataset """ if dataset and not self.dataset_exists(dataset): self.make_dataset(dataset) new_job = self.client.jobs().insert(projectId=project_id, body=body).execute() job_id = new_job['jobReference']['jobId'] logger.info('Started import job %s:%s', project_id, job_id) while True: status = self.client.jobs().get(projectId=project_id, jobId=job_id).execute() if status['status']['state'] == 'DONE': if status['status'].get('errors'): raise Exception('Bigquery job failed: {}'.format(status['status']['errors'])) return logger.info('Waiting for job %s:%s to complete...', project_id, job_id) time.sleep(5.0) def copy(self, source_table, dest_table, create_disposition=CreateDisposition.CREATE_IF_NEEDED, write_disposition=WriteDisposition.WRITE_TRUNCATE): """Copies (or appends) a table to another table. :param source_table: :type source_table: BQTable :param dest_table: :type dest_table: BQTable :param create_disposition: whether to create the table if needed :type create_disposition: CreateDisposition :param write_disposition: whether to append/truncate/fail if the table exists :type write_disposition: WriteDisposition """ job = { "projectId": dest_table.project_id, "configuration": { "copy": { "sourceTable": { "projectId": source_table.project_id, "datasetId": source_table.dataset_id, "tableId": source_table.table_id, }, "destinationTable": { "projectId": dest_table.project_id, "datasetId": dest_table.dataset_id, "tableId": dest_table.table_id, }, "createDisposition": create_disposition, "writeDisposition": write_disposition, } } } self.run_job(dest_table.project_id, job, dataset=dest_table.dataset) class BigqueryTarget(luigi.target.Target): def __init__(self, project_id, dataset_id, table_id, client=None): self.table = BQTable(project_id=project_id, dataset_id=dataset_id, table_id=table_id) self.client = client or BigqueryClient() @classmethod def from_bqtable(cls, table, client=None): """A constructor that takes a :py:class:`BQTable`. :param table: :type table: BQTable """ return cls(table.project_id, table.dataset_id, table.table_id, client=client) def exists(self): return self.client.table_exists(self.table) def __str__(self): return str(self.table) class MixinBigqueryBulkComplete(object): """ Allows to efficiently check if a range of BigqueryTargets are complete. This enables scheduling tasks with luigi range tools. If you implement a custom Luigi task with a BigqueryTarget output, make sure to also inherit from this mixin to enable range support. """ @classmethod def bulk_complete(cls, parameter_tuples): if len(parameter_tuples) < 1: return # Instantiate the tasks to inspect them tasks_with_params = [(cls(p), p) for p in parameter_tuples] # Grab the set of BigQuery datasets we are interested in datasets = set([t.output().table.dataset for t, p in tasks_with_params]) logger.info('Checking datasets %s for available tables', datasets) # Query the available tables for all datasets client = tasks_with_params[0][0].output().client available_datasets = filter(client.dataset_exists, datasets) available_tables = {d: set(client.list_tables(d)) for d in available_datasets} # Return parameter_tuples belonging to available tables for t, p in tasks_with_params: table = t.output().table if table.table_id in available_tables.get(table.dataset, []): yield p class BigqueryLoadTask(MixinBigqueryBulkComplete, luigi.Task): """Load data into bigquery from GCS.""" @property def source_format(self): """The source format to use (see :py:class:`SourceFormat`).""" return SourceFormat.NEWLINE_DELIMITED_JSON @property def encoding(self): """The encoding of the data that is going to be loaded (see :py:class:`Encoding`).""" return Encoding.UTF_8 @property def write_disposition(self): """What to do if the table already exists. By default this will fail the job. See :py:class:`WriteDisposition`""" return WriteDisposition.WRITE_EMPTY @property def schema(self): """Schema in the format defined at https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.schema. If the value is falsy, it is omitted and inferred by bigquery, which only works for CSV inputs.""" return [] @property def max_bad_records(self): """ The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result.""" return 0 @property def field_delimter(self): """The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character.""" return FieldDelimiter.COMMA @property def source_uris(self): """The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name.""" return [x.path for x in luigi.task.flatten(self.input())] @property def skip_leading_rows(self): """The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.""" return 0 @property def allow_jagged_rows(self): """Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.""" return False @property def ignore_unknown_values(self): """Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names""" return False @property def allow_quoted_new_lines(self): """ Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.""" return False def run(self): output = self.output() assert isinstance(output, BigqueryTarget), 'Output should be a bigquery target, not %s' % (output) bq_client = output.client source_uris = self.source_uris() assert all(x.startswith('gs://') for x in source_uris) job = { 'projectId': output.table.project_id, 'configuration': { 'load': { 'destinationTable': { 'projectId': output.table.project_id, 'datasetId': output.table.dataset_id, 'tableId': output.table.table_id, }, 'encoding': self.encoding, 'sourceFormat': self.source_format, 'writeDisposition': self.write_disposition, 'sourceUris': source_uris, 'maxBadRecords': self.max_bad_records, 'ignoreUnknownValues': self.ignore_unknown_values } } } if self.source_format == SourceFormat.CSV: job['configuration']['load']['fieldDelimiter'] = self.field_delimter job['configuration']['load']['skipLeadingRows'] = self.skip_leading_rows job['configuration']['load']['allowJaggedRows'] = self.allow_jagged_rows job['configuration']['load']['allowQuotedNewlines'] = self.allow_quoted_new_lines if self.schema: job['configuration']['load']['schema'] = {'fields': self.schema} bq_client.run_job(output.table.project_id, job, dataset=output.table.dataset) class BigqueryRunQueryTask(MixinBigqueryBulkComplete, luigi.Task): @property def write_disposition(self): """What to do if the table already exists. By default this will fail the job. See :py:class:`WriteDisposition`""" return WriteDisposition.WRITE_TRUNCATE @property def create_disposition(self): """Whether to create the table or not. See :py:class:`CreateDisposition`""" return CreateDisposition.CREATE_IF_NEEDED @property def flatten_results(self): """Flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to False.""" return True @property def query(self): """The query, in text form.""" raise NotImplementedError() @property def query_mode(self): """The query mode. See :py:class:`QueryMode`.""" return QueryMode.INTERACTIVE def run(self): output = self.output() assert isinstance(output, BigqueryTarget), 'Output should be a bigquery target, not %s' % (output) query = self.query assert query, 'No query was provided' bq_client = output.client logger.info('Launching Query') logger.info('Query destination: %s (%s)', output, self.write_disposition) logger.info('Query SQL: %s', query) job = { 'projectId': output.table.project_id, 'configuration': { 'query': { 'query': query, 'priority': self.query_mode, 'destinationTable': { 'projectId': output.table.project_id, 'datasetId': output.table.dataset_id, 'tableId': output.table.table_id, }, 'allowLargeResults': True, 'createDisposition': self.create_disposition, 'writeDisposition': self.write_disposition, 'flattenResults': self.flatten_results } } } bq_client.run_job(output.table.project_id, job, dataset=output.table.dataset) class BigqueryCreateViewTask(luigi.Task): """ Creates (or updates) a view in BigQuery. The output of this task needs to be a BigQueryTarget. Instances of this class should specify the view SQL in the view property. If a view already exist in BigQuery at output(), it will be updated. """ @property def view(self): """The SQL query for the view, in text form.""" raise NotImplementedError() def complete(self): output = self.output() assert isinstance(output, BigqueryTarget), 'Output must be a bigquery target, not %s' % (output) if not output.exists(): return False existing_view = output.client.get_view(output.table) return existing_view == self.view def run(self): output = self.output() assert isinstance(output, BigqueryTarget), 'Output must be a bigquery target, not %s' % (output) view = self.view assert view, 'No view was provided' logger.info('Create view') logger.info('Destination: %s', output) logger.info('View SQL: %s', view) output.client.update_view(output.table, view) class ExternalBigqueryTask(MixinBigqueryBulkComplete, luigi.ExternalTask): """ An external task for a BigQuery target. """ pass
1
15,887
Can you change this to use `.format()`?
spotify-luigi
py
@@ -308,13 +308,15 @@ func (r *DefaultRuleRenderer) endpointIptablesChain( }, }) - if dropEncap { + if dropEncap && r.Config.DropVXLANPacketsFromWorkloads { rules = append(rules, Rule{ Match: Match().ProtocolNum(ProtoUDP). DestPorts(uint16(r.Config.VXLANPort)), Action: DropAction{}, Comment: []string{"Drop VXLAN encapped packets originating in pods"}, }) + } + if dropEncap && r.Config.DropIPIPPacketsFromWorkloads { rules = append(rules, Rule{ Match: Match().ProtocolNum(ProtoIPIP), Action: DropAction{},
1
// Copyright (c) 2016-2020 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rules import ( log "github.com/sirupsen/logrus" "github.com/projectcalico/felix/hashutils" . "github.com/projectcalico/felix/iptables" "github.com/projectcalico/felix/proto" ) const ( dropEncap = true dontDropEncap = false ) func (r *DefaultRuleRenderer) WorkloadEndpointToIptablesChains( ifaceName string, epMarkMapper EndpointMarkMapper, adminUp bool, ingressPolicies []string, egressPolicies []string, profileIDs []string, ) []*Chain { result := []*Chain{} result = append(result, // Chain for traffic _to_ the endpoint. r.endpointIptablesChain( ingressPolicies, profileIDs, ifaceName, PolicyInboundPfx, ProfileInboundPfx, WorkloadToEndpointPfx, "", // No fail-safe chains for workloads. chainTypeNormal, adminUp, r.filterAllowAction, // Workload endpoint chains are only used in the filter table dontDropEncap, ), // Chain for traffic _from_ the endpoint. r.endpointIptablesChain( egressPolicies, profileIDs, ifaceName, PolicyOutboundPfx, ProfileOutboundPfx, WorkloadFromEndpointPfx, "", // No fail-safe chains for workloads. chainTypeNormal, adminUp, r.filterAllowAction, // Workload endpoint chains are only used in the filter table dropEncap, ), ) if r.KubeIPVSSupportEnabled { // Chain for setting endpoint mark of an endpoint. result = append(result, r.endpointSetMarkChain( ifaceName, epMarkMapper, SetEndPointMarkPfx, ), ) } return result } func (r *DefaultRuleRenderer) HostEndpointToFilterChains( ifaceName string, epMarkMapper EndpointMarkMapper, ingressPolicyNames []string, egressPolicyNames []string, ingressForwardPolicyNames []string, egressForwardPolicyNames []string, profileIDs []string, ) []*Chain { log.WithField("ifaceName", ifaceName).Debug("Rendering filter host endpoint chain.") result := []*Chain{} result = append(result, // Chain for output traffic _to_ the endpoint. r.endpointIptablesChain( egressPolicyNames, profileIDs, ifaceName, PolicyOutboundPfx, ProfileOutboundPfx, HostToEndpointPfx, ChainFailsafeOut, chainTypeNormal, true, // Host endpoints are always admin up. r.filterAllowAction, dontDropEncap, ), // Chain for input traffic _from_ the endpoint. r.endpointIptablesChain( ingressPolicyNames, profileIDs, ifaceName, PolicyInboundPfx, ProfileInboundPfx, HostFromEndpointPfx, ChainFailsafeIn, chainTypeNormal, true, // Host endpoints are always admin up. r.filterAllowAction, dontDropEncap, ), // Chain for forward traffic _to_ the endpoint. r.endpointIptablesChain( egressForwardPolicyNames, profileIDs, ifaceName, PolicyOutboundPfx, ProfileOutboundPfx, HostToEndpointForwardPfx, "", // No fail-safe chains for forward traffic. chainTypeForward, true, // Host endpoints are always admin up. r.filterAllowAction, dontDropEncap, ), // Chain for forward traffic _from_ the endpoint. r.endpointIptablesChain( ingressForwardPolicyNames, profileIDs, ifaceName, PolicyInboundPfx, ProfileInboundPfx, HostFromEndpointForwardPfx, "", // No fail-safe chains for forward traffic. chainTypeForward, true, // Host endpoints are always admin up. r.filterAllowAction, dontDropEncap, ), ) if r.KubeIPVSSupportEnabled { // Chain for setting endpoint mark of an endpoint. result = append(result, r.endpointSetMarkChain( ifaceName, epMarkMapper, SetEndPointMarkPfx, ), ) } return result } func (r *DefaultRuleRenderer) HostEndpointToRawChains( ifaceName string, ingressPolicyNames []string, egressPolicyNames []string, ) []*Chain { log.WithField("ifaceName", ifaceName).Debug("Rendering raw (untracked) host endpoint chain.") return []*Chain{ // Chain for traffic _to_ the endpoint. r.endpointIptablesChain( egressPolicyNames, nil, // We don't render profiles into the raw table. ifaceName, PolicyOutboundPfx, ProfileOutboundPfx, HostToEndpointPfx, ChainFailsafeOut, chainTypeUntracked, true, // Host endpoints are always admin up. AcceptAction{}, dontDropEncap, ), // Chain for traffic _from_ the endpoint. r.endpointIptablesChain( ingressPolicyNames, nil, // We don't render profiles into the raw table. ifaceName, PolicyInboundPfx, ProfileInboundPfx, HostFromEndpointPfx, ChainFailsafeIn, chainTypeUntracked, true, // Host endpoints are always admin up. AcceptAction{}, dontDropEncap, ), } } func (r *DefaultRuleRenderer) HostEndpointToMangleChains( ifaceName string, preDNATPolicyNames []string, ) []*Chain { log.WithField("ifaceName", ifaceName).Debug("Rendering pre-DNAT host endpoint chain.") return []*Chain{ // Chain for traffic _from_ the endpoint. Pre-DNAT policy does not apply to // outgoing traffic through a host endpoint. r.endpointIptablesChain( preDNATPolicyNames, nil, // We don't render profiles into the raw table. ifaceName, PolicyInboundPfx, ProfileInboundPfx, HostFromEndpointPfx, ChainFailsafeIn, chainTypePreDNAT, true, // Host endpoints are always admin up. r.mangleAllowAction, dontDropEncap, ), } } type endpointChainType int const ( chainTypeNormal endpointChainType = iota chainTypeUntracked chainTypePreDNAT chainTypeForward ) func (r *DefaultRuleRenderer) endpointSetMarkChain( name string, epMarkMapper EndpointMarkMapper, endpointPrefix string, ) *Chain { rules := []Rule{} chainName := EndpointChainName(endpointPrefix, name) if endPointMark, err := epMarkMapper.GetEndpointMark(name); err == nil { // Set endpoint mark. rules = append(rules, Rule{ Action: SetMaskedMarkAction{ Mark: endPointMark, Mask: epMarkMapper.GetMask()}, }) } return &Chain{ Name: chainName, Rules: rules, } } func (r *DefaultRuleRenderer) endpointIptablesChain( policyNames []string, profileIds []string, name string, policyPrefix PolicyChainNamePrefix, profilePrefix ProfileChainNamePrefix, endpointPrefix string, failsafeChain string, chainType endpointChainType, adminUp bool, allowAction Action, dropEncap bool, ) *Chain { rules := []Rule{} chainName := EndpointChainName(endpointPrefix, name) if !adminUp { // Endpoint is admin-down, drop all traffic to/from it. rules = append(rules, Rule{ Match: Match(), Action: DropAction{}, Comment: []string{"Endpoint admin disabled"}, }) return &Chain{ Name: chainName, Rules: rules, } } if chainType != chainTypeUntracked { // Tracked chain: install conntrack rules, which implement our stateful connections. // This allows return traffic associated with a previously-permitted request. rules = r.appendConntrackRules(rules, allowAction) } // First set up failsafes. if failsafeChain != "" { rules = append(rules, Rule{ Action: JumpAction{Target: failsafeChain}, }) } // Start by ensuring that the accept mark bit is clear, policies set that bit to indicate // that they accepted the packet. rules = append(rules, Rule{ Action: ClearMarkAction{ Mark: r.IptablesMarkAccept, }, }) if dropEncap { rules = append(rules, Rule{ Match: Match().ProtocolNum(ProtoUDP). DestPorts(uint16(r.Config.VXLANPort)), Action: DropAction{}, Comment: []string{"Drop VXLAN encapped packets originating in pods"}, }) rules = append(rules, Rule{ Match: Match().ProtocolNum(ProtoIPIP), Action: DropAction{}, Comment: []string{"Drop IPinIP encapped packets originating in pods"}, }) } if len(policyNames) > 0 { // Clear the "pass" mark. If a policy sets that mark, we'll skip the rest of the policies and // continue processing the profiles, if there are any. rules = append(rules, Rule{ Comment: []string{"Start of policies"}, Action: ClearMarkAction{ Mark: r.IptablesMarkPass, }, }) // Then, jump to each policy in turn. for _, polID := range policyNames { polChainName := PolicyChainName( policyPrefix, &proto.PolicyID{Name: polID}, ) // If a previous policy didn't set the "pass" mark, jump to the policy. rules = append(rules, Rule{ Match: Match().MarkClear(r.IptablesMarkPass), Action: JumpAction{Target: polChainName}, }) // If policy marked packet as accepted, it returns, setting the accept // mark bit. if chainType == chainTypeUntracked { // For an untracked policy, map allow to "NOTRACK and ALLOW". rules = append(rules, Rule{ Match: Match().MarkSingleBitSet(r.IptablesMarkAccept), Action: NoTrackAction{}, }) } // If accept bit is set, return from this chain. We don't immediately // accept because there may be other policy still to apply. rules = append(rules, Rule{ Match: Match().MarkSingleBitSet(r.IptablesMarkAccept), Action: ReturnAction{}, Comment: []string{"Return if policy accepted"}, }) } if chainType == chainTypeNormal || chainType == chainTypeForward { // When rendering normal and forward rules, if no policy marked the packet as "pass", drop the // packet. // // For untracked and pre-DNAT rules, we don't do that because there may be // normal rules still to be applied to the packet in the filter table. rules = append(rules, Rule{ Match: Match().MarkClear(r.IptablesMarkPass), Action: DropAction{}, Comment: []string{"Drop if no policies passed packet"}, }) } } else if chainType == chainTypeForward { // Forwarded traffic is allowed when there are no policies with // applyOnForward that apply to this endpoint (and in this direction). rules = append(rules, Rule{ Action: SetMarkAction{Mark: r.IptablesMarkAccept}, Comment: []string{"Allow forwarded traffic by default"}, }) rules = append(rules, Rule{ Action: ReturnAction{}, Comment: []string{"Return for accepted forward traffic"}, }) } if chainType == chainTypeNormal { // Then, jump to each profile in turn. for _, profileID := range profileIds { profChainName := ProfileChainName(profilePrefix, &proto.ProfileID{Name: profileID}) rules = append(rules, Rule{Action: JumpAction{Target: profChainName}}, // If policy marked packet as accepted, it returns, setting the // accept mark bit. If that is set, return from this chain. Rule{ Match: Match().MarkSingleBitSet(r.IptablesMarkAccept), Action: ReturnAction{}, Comment: []string{"Return if profile accepted"}, }) } // When rendering normal rules, if no profile marked the packet as accepted, drop // the packet. // // For untracked rules, we don't do that because there may be tracked rules // still to be applied to the packet in the filter table. //if dropIfNoProfilesMatched { rules = append(rules, Rule{ Match: Match(), Action: DropAction{}, Comment: []string{"Drop if no profiles matched"}, }) //} } return &Chain{ Name: chainName, Rules: rules, } } func (r *DefaultRuleRenderer) appendConntrackRules(rules []Rule, allowAction Action) []Rule { // Allow return packets for established connections. if allowAction != (AcceptAction{}) { // If we've been asked to return instead of accept the packet immediately, // make sure we flag the packet as allowed. rules = append(rules, Rule{ Match: Match().ConntrackState("RELATED,ESTABLISHED"), Action: SetMarkAction{Mark: r.IptablesMarkAccept}, }, ) } rules = append(rules, Rule{ Match: Match().ConntrackState("RELATED,ESTABLISHED"), Action: allowAction, }, ) if !r.Config.DisableConntrackInvalid { // Drop packets that aren't either a valid handshake or part of an established // connection. rules = append(rules, Rule{ Match: Match().ConntrackState("INVALID"), Action: DropAction{}, }) } return rules } func EndpointChainName(prefix string, ifaceName string) string { return hashutils.GetLengthLimitedID( prefix, ifaceName, MaxChainNameLength, ) }
1
18,306
Maybe the parameter should be consulted higher up the stack so that we only set dropEncap if we're rendering a workload egress chain and the flag is set?
projectcalico-felix
go
@@ -47,11 +47,11 @@ type SectorBuilder interface { // which will fit into a newly-provisioned staged sector. GetMaxUserBytesPerStagedSector() (uint64, error) - // GeneratePoST creates a proof-of-spacetime for the replicas managed by + // GeneratePoSt creates a proof-of-spacetime for the replicas managed by // the SectorBuilder. Its output includes the proof-of-spacetime proof which // is posted to the blockchain along with any faults. The proof can be // verified by the VerifyPoSt method on the Verifier interface. - GeneratePoST(GeneratePoSTRequest) (GeneratePoSTResponse, error) + GeneratePoSt(GeneratePoStRequest) (GeneratePoStResponse, error) // Close signals that this SectorBuilder is no longer in use. SectorBuilder // metadata will not be deleted when Close is called; an equivalent
1
package sectorbuilder import ( "context" "io" "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid" cbor "gx/ipfs/QmcZLyosDwMKdB6NLRsiss9HXzDPhVhhRtPy67JFKTDQDX/go-ipld-cbor" "github.com/filecoin-project/go-filecoin/proofs" ) func init() { cbor.RegisterCborType(PieceInfo{}) } // SectorBuilder provides an interface through which user piece-bytes can be // written, sealed into sectors, and later unsealed and read. type SectorBuilder interface { // AddPiece writes the given piece into an unsealed sector and returns the // id of that sector. This method has a race; it is possible that the // sector into which the piece-bytes were written is sealed before this // method returns. In the real world this should not happen, as sealing // takes a long time to complete. In tests, where sealing happens // near-instantaneously, it is possible to exercise this race. // // TODO: Replace this method with something that accepts a piece cid and a // value which represents the number of bytes in the piece and returns a // sector id (to which piece bytes will be written) and a Writer. AddPiece(ctx context.Context, pi *PieceInfo) (sectorID uint64, err error) // ReadPieceFromSealedSector produces a Reader used to get original // piece-bytes from a sealed sector. ReadPieceFromSealedSector(pieceCid cid.Cid) (io.Reader, error) // SealAllStagedSectors seals any non-empty staged sectors. SealAllStagedSectors(ctx context.Context) error // SectorSealResults returns an unbuffered channel that is sent a value // whenever sealing completes. All calls to SectorSealResults will get the // same channel. Values will be either a *SealedSectorMetadata or an error. A // *SealedSectorMetadata will be sent to the returned channel only once, regardless // of the number of times SectorSealResults is called. SectorSealResults() <-chan SectorSealResult // GetMaxUserBytesPerStagedSector produces the number of user piece-bytes // which will fit into a newly-provisioned staged sector. GetMaxUserBytesPerStagedSector() (uint64, error) // GeneratePoST creates a proof-of-spacetime for the replicas managed by // the SectorBuilder. Its output includes the proof-of-spacetime proof which // is posted to the blockchain along with any faults. The proof can be // verified by the VerifyPoSt method on the Verifier interface. GeneratePoST(GeneratePoSTRequest) (GeneratePoSTResponse, error) // Close signals that this SectorBuilder is no longer in use. SectorBuilder // metadata will not be deleted when Close is called; an equivalent // SectorBuilder can be created later by applying the Init function to the // arguments used to create the instance being closed. Close() error } // SectorSealResult represents the outcome of a sector's sealing. type SectorSealResult struct { SectorID uint64 // SealingErr contains any error encountered while sealing. // Note: Either SealingResult or SealingErr may be non-nil, not both. SealingErr error // SealingResult contains the successful output of the sealing operation. // Note: Either SealingResult or SealingErr may be non-nil, not both. SealingResult *SealedSectorMetadata } // PieceInfo is information about a filecoin piece type PieceInfo struct { Ref cid.Cid `json:"ref"` Size uint64 `json:"size"` // TODO: use BytesAmount } // SealedSectorMetadata is a sector that has been sealed by the PoRep setup process type SealedSectorMetadata struct { CommD proofs.CommD CommR proofs.CommR // deprecated (will be removed soon) CommRStar proofs.CommRStar Pieces []*PieceInfo // deprecated (will be removed soon) Proof proofs.SealProof SectorID uint64 } // GeneratePoSTRequest represents a request to generate a proof-of-spacetime. type GeneratePoSTRequest struct { CommRs []proofs.CommR ChallengeSeed proofs.PoStChallengeSeed } // GeneratePoSTResponse contains PoST proof and any faults that may have occurred. type GeneratePoSTResponse struct { Faults []uint64 Proof proofs.PoStProof }
1
17,493
new casing is less ELiTE
filecoin-project-venus
go
@@ -179,6 +179,14 @@ bool wlr_renderer_blit_dmabuf(struct wlr_renderer *r, return r->impl->blit_dmabuf(r, dst, src); } +GLuint wlr_renderer_renderbuffer_from_image(struct wlr_renderer *r, + EGLImageKHR image) { + if (!r->impl->renderbuffer_from_image) { + return false; + } + return r->impl->renderbuffer_from_image(image); +} + bool wlr_renderer_format_supported(struct wlr_renderer *r, enum wl_shm_format fmt) { return r->impl->format_supported(r, fmt);
1
#include <assert.h> #include <stdbool.h> #include <stdlib.h> #include <wlr/render/gles2.h> #include <wlr/render/interface.h> #include <wlr/render/wlr_renderer.h> #include <wlr/types/wlr_matrix.h> #include <wlr/util/log.h> #include "util/signal.h" void wlr_renderer_init(struct wlr_renderer *renderer, const struct wlr_renderer_impl *impl) { assert(impl->begin); assert(impl->clear); assert(impl->scissor); assert(impl->render_subtexture_with_matrix); assert(impl->render_quad_with_matrix); assert(impl->render_ellipse_with_matrix); assert(impl->formats); assert(impl->format_supported); assert(impl->texture_from_pixels); renderer->impl = impl; wl_signal_init(&renderer->events.destroy); } void wlr_renderer_destroy(struct wlr_renderer *r) { if (!r) { return; } wlr_signal_emit_safe(&r->events.destroy, r); if (r->impl && r->impl->destroy) { r->impl->destroy(r); } else { free(r); } } void wlr_renderer_begin(struct wlr_renderer *r, int width, int height) { assert(!r->rendering); r->impl->begin(r, width, height); r->rendering = true; } void wlr_renderer_end(struct wlr_renderer *r) { assert(r->rendering); if (r->impl->end) { r->impl->end(r); } r->rendering = false; } void wlr_renderer_clear(struct wlr_renderer *r, const float color[static 4]) { assert(r->rendering); r->impl->clear(r, color); } void wlr_renderer_scissor(struct wlr_renderer *r, struct wlr_box *box) { assert(r->rendering); r->impl->scissor(r, box); } bool wlr_render_texture(struct wlr_renderer *r, struct wlr_texture *texture, const float projection[static 9], int x, int y, float alpha) { struct wlr_box box = { .x = x, .y = y }; wlr_texture_get_size(texture, &box.width, &box.height); float matrix[9]; wlr_matrix_project_box(matrix, &box, WL_OUTPUT_TRANSFORM_NORMAL, 0, projection); return wlr_render_texture_with_matrix(r, texture, matrix, alpha); } bool wlr_render_texture_with_matrix(struct wlr_renderer *r, struct wlr_texture *texture, const float matrix[static 9], float alpha) { struct wlr_fbox box = { .x = 0, .y = 0, .width = texture->width, .height = texture->height, }; return wlr_render_subtexture_with_matrix(r, texture, &box, matrix, alpha); } bool wlr_render_subtexture_with_matrix(struct wlr_renderer *r, struct wlr_texture *texture, const struct wlr_fbox *box, const float matrix[static 9], float alpha) { assert(r->rendering); return r->impl->render_subtexture_with_matrix(r, texture, box, matrix, alpha); } void wlr_render_rect(struct wlr_renderer *r, const struct wlr_box *box, const float color[static 4], const float projection[static 9]) { assert(box->width > 0 && box->height > 0); float matrix[9]; wlr_matrix_project_box(matrix, box, WL_OUTPUT_TRANSFORM_NORMAL, 0, projection); wlr_render_quad_with_matrix(r, color, matrix); } void wlr_render_quad_with_matrix(struct wlr_renderer *r, const float color[static 4], const float matrix[static 9]) { assert(r->rendering); r->impl->render_quad_with_matrix(r, color, matrix); } void wlr_render_ellipse(struct wlr_renderer *r, const struct wlr_box *box, const float color[static 4], const float projection[static 9]) { assert(box->width > 0 && box->height > 0); float matrix[9]; wlr_matrix_project_box(matrix, box, WL_OUTPUT_TRANSFORM_NORMAL, 0, projection); wlr_render_ellipse_with_matrix(r, color, matrix); } void wlr_render_ellipse_with_matrix(struct wlr_renderer *r, const float color[static 4], const float matrix[static 9]) { assert(r->rendering); r->impl->render_ellipse_with_matrix(r, color, matrix); } const enum wl_shm_format *wlr_renderer_get_formats( struct wlr_renderer *r, size_t *len) { return r->impl->formats(r, len); } bool wlr_renderer_resource_is_wl_drm_buffer(struct wlr_renderer *r, struct wl_resource *resource) { if (!r->impl->resource_is_wl_drm_buffer) { return false; } return r->impl->resource_is_wl_drm_buffer(r, resource); } void wlr_renderer_wl_drm_buffer_get_size(struct wlr_renderer *r, struct wl_resource *buffer, int *width, int *height) { if (!r->impl->wl_drm_buffer_get_size) { return; } return r->impl->wl_drm_buffer_get_size(r, buffer, width, height); } const struct wlr_drm_format_set *wlr_renderer_get_dmabuf_formats( struct wlr_renderer *r) { if (!r->impl->get_dmabuf_formats) { return NULL; } return r->impl->get_dmabuf_formats(r); } bool wlr_renderer_read_pixels(struct wlr_renderer *r, enum wl_shm_format fmt, uint32_t *flags, uint32_t stride, uint32_t width, uint32_t height, uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y, void *data) { if (!r->impl->read_pixels) { return false; } return r->impl->read_pixels(r, fmt, flags, stride, width, height, src_x, src_y, dst_x, dst_y, data); } bool wlr_renderer_blit_dmabuf(struct wlr_renderer *r, struct wlr_dmabuf_attributes *dst, struct wlr_dmabuf_attributes *src) { assert(!r->rendering); if (!r->impl->blit_dmabuf) { return false; } return r->impl->blit_dmabuf(r, dst, src); } bool wlr_renderer_format_supported(struct wlr_renderer *r, enum wl_shm_format fmt) { return r->impl->format_supported(r, fmt); } bool wlr_renderer_init_wl_display(struct wlr_renderer *r, struct wl_display *wl_display) { if (wl_display_init_shm(wl_display)) { wlr_log(WLR_ERROR, "Failed to initialize shm"); return false; } size_t len; const enum wl_shm_format *formats = wlr_renderer_get_formats(r, &len); if (formats == NULL) { wlr_log(WLR_ERROR, "Failed to initialize shm: cannot get formats"); return false; } for (size_t i = 0; i < len; ++i) { // These formats are already added by default if (formats[i] != WL_SHM_FORMAT_ARGB8888 && formats[i] != WL_SHM_FORMAT_XRGB8888) { wl_display_add_shm_format(wl_display, formats[i]); } } if (r->impl->init_wl_display) { if (!r->impl->init_wl_display(r, wl_display)) { return false; } } return true; } struct wlr_renderer *wlr_renderer_autocreate(struct wlr_egl *egl, EGLenum platform, void *remote_display, EGLint *config_attribs, EGLint visual_id) { // Append GLES2-specific bits to the provided EGL config attributes EGLint gles2_config_attribs[] = { EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, EGL_NONE, }; size_t config_attribs_len = 0; // not including terminating EGL_NONE while (config_attribs != NULL && config_attribs[config_attribs_len] != EGL_NONE) { ++config_attribs_len; } size_t all_config_attribs_len = config_attribs_len + sizeof(gles2_config_attribs) / sizeof(gles2_config_attribs[0]); EGLint all_config_attribs[all_config_attribs_len]; if (config_attribs_len > 0) { memcpy(all_config_attribs, config_attribs, config_attribs_len * sizeof(EGLint)); } memcpy(&all_config_attribs[config_attribs_len], gles2_config_attribs, sizeof(gles2_config_attribs)); if (!wlr_egl_init(egl, platform, remote_display, all_config_attribs, visual_id)) { wlr_log(WLR_ERROR, "Could not initialize EGL"); return NULL; } struct wlr_renderer *renderer = wlr_gles2_renderer_create(egl); if (!renderer) { wlr_egl_finish(egl); } return renderer; }
1
15,572
This leaks EGL/GL implementation details into the generic renderer interface.
swaywm-wlroots
c
@@ -32,6 +32,16 @@ module Travis end end + class DeployConditionError < DeployConfigError + def initialize(msg = "The \\`deploy.on\\` should be a hash (dictionary).") + super + end + + def doc_path + '/user/deployment#Conditional-Releases-with-on%3A' + end + end + class AptSourcesConfigError < CompilationError def initialize(msg = "\\`apt\\` should be a hash with key \\`sources\\` and an array as a value.") super
1
module Travis module Build class CompilationError < StandardError attr_accessor :doc_path def initialize(msg = '') @msg = msg end def to_s @msg end end class EnvVarDefinitionError < CompilationError def initialize(msg = "Environment variables definition is incorrect.") super end def doc_path '/user/environment-variables' end end class DeployConfigError < CompilationError def initialize(msg = "The \\`deploy\\` configuration should be a hash (dictionary), or an array of hashes.") super end def doc_path '/user/deployment' end end class AptSourcesConfigError < CompilationError def initialize(msg = "\\`apt\\` should be a hash with key \\`sources\\` and an array as a value.") super end def doc_path '/user/installing-dependencies' end end class AptPackagesConfigError < CompilationError def initialize(msg = "\\`apt\\` should be a hash with key \\`packages\\` and an array as a value.") super end def doc_path '/user/installing-dependencies' end end end end
1
15,701
Maybe add *key* i.e. `"The \\`deploy.on\\` key should be a hash (dictionary).`
travis-ci-travis-build
rb
@@ -75,7 +75,7 @@ class StatusGridBuilder implements GridBuilderInterface { $result = []; foreach ($this->statusQuery->getAllStatuses($language) as $code => $status) { - $result[] = new StatusOption($code, $code, new Color($status['color']), $status['name']); + $result[] = new StatusOption($code, $status['code'], new Color($status['color']), $status['name']); } return $result;
1
<?php /** * Copyright © Ergonode Sp. z o.o. All rights reserved. * See LICENSE.txt for license details. */ declare(strict_types=1); namespace Ergonode\Workflow\Infrastructure\Grid; use Ergonode\Core\Domain\ValueObject\Color; use Ergonode\Core\Domain\ValueObject\Language; use Ergonode\Grid\Column\BoolColumn; use Ergonode\Grid\Column\LabelColumn; use Ergonode\Grid\Column\LinkColumn; use Ergonode\Grid\Column\TextColumn; use Ergonode\Grid\Filter\MultiSelectFilter; use Ergonode\Grid\Filter\TextFilter; use Ergonode\Grid\GridConfigurationInterface; use Ergonode\Workflow\Domain\Query\StatusQueryInterface; use Ergonode\Workflow\Infrastructure\Grid\Filter\Option\StatusOption; use Symfony\Component\HttpFoundation\Request; use Ergonode\Grid\GridInterface; use Ergonode\Grid\GridBuilderInterface; use Ergonode\Grid\Grid; use Ergonode\Grid\Column\IdColumn; class StatusGridBuilder implements GridBuilderInterface { private StatusQueryInterface $statusQuery; public function __construct(StatusQueryInterface $statusQuery) { $this->statusQuery = $statusQuery; } public function build(GridConfigurationInterface $configuration, Language $language): GridInterface { $codes = $this->getCodes($language); $grid = new Grid(); $grid ->addColumn('id', new IdColumn('id')) ->addColumn('code', new TextColumn('code', 'System name', new TextFilter())) ->addColumn('status', new LabelColumn('status', 'Status', new MultiSelectFilter($codes))) ->addColumn('name', new TextColumn('name', 'Name', new TextFilter())) ->addColumn('description', new TextColumn('description', 'Description', new TextFilter())) ->addColumn('is_default', new BoolColumn('is_default', 'Initial status')) ->addColumn('_links', new LinkColumn('hal', [ 'get' => [ 'route' => 'ergonode_workflow_status_read', 'parameters' => ['language' => $language->getCode(), 'status' => '{id}'], 'privilege' => 'WORKFLOW_GET_STATUS', ], 'edit' => [ 'route' => 'ergonode_workflow_status_change', 'parameters' => ['language' => $language->getCode(), 'status' => '{id}'], 'privilege' => 'WORKFLOW_PUT_STATUS', 'method' => Request::METHOD_PUT, ], 'delete' => [ 'route' => 'ergonode_workflow_status_delete', 'parameters' => ['language' => $language->getCode(), 'status' => '{id}'], 'privilege' => 'WORKFLOW_DELETE_STATUS', 'method' => Request::METHOD_DELETE, ], ])) ->orderBy('code', 'DESC'); return $grid; } private function getCodes(Language $language): array { $result = []; foreach ($this->statusQuery->getAllStatuses($language) as $code => $status) { $result[] = new StatusOption($code, $code, new Color($status['color']), $status['name']); } return $result; } }
1
9,490
its be good also change $code na $id, actual name is misleading and that was probably actual error generator
ergonode-backend
php
@@ -10,9 +10,8 @@ from listenbrainz.listenstore import ListenStore class RedisListenStore(ListenStore): - RECENT_LISTENS_KEY = "lb_recent_listens" - RECENT_LISTENS_MAX = 100 - RECENT_LISTENS_MAX_TIME_DIFFERENCE = 300 + RECENT_LISTENS_KEY = "lb_recent_sorted" + RECENT_LISTENS_MAX = 100 def __init__(self, log, conf): super(RedisListenStore, self).__init__(log)
1
# coding=utf-8 import ujson import redis from time import time from redis import Redis from listenbrainz.listen import Listen from listenbrainz.listenstore import ListenStore class RedisListenStore(ListenStore): RECENT_LISTENS_KEY = "lb_recent_listens" RECENT_LISTENS_MAX = 100 RECENT_LISTENS_MAX_TIME_DIFFERENCE = 300 def __init__(self, log, conf): super(RedisListenStore, self).__init__(log) self.log.info('Connecting to redis: %s:%s', conf['REDIS_HOST'], conf['REDIS_PORT']) self.redis = Redis(host=conf['REDIS_HOST'], port=conf['REDIS_PORT'], decode_responses=True) def get_playing_now(self, user_id): """ Return the current playing song of the user Arguments: user_id (int): the id of the user in the db Returns: Listen object which is the currently playing song of the user """ data = self.redis.get('playing_now:{}'.format(user_id)) if not data: return None data = ujson.loads(data) data.update({'playing_now': True}) return Listen.from_json(data) def put_playing_now(self, user_id, listen, expire_time): """ Save a listen as `playing_now` for a particular time in Redis. Args: user_id (int): the row ID of the user listen (dict): the listen data expire_time (int): the time in seconds in which the `playing_now` listen should expire """ self.redis.setex( 'playing_now:{}'.format(user_id), ujson.dumps(listen).encode('utf-8'), expire_time, ) def check_connection(self): """ Pings the redis server to check if the connection works or not """ try: self.redis.ping() except redis.exceptions.ConnectionError as e: self.log.error("Redis ping didn't work: {}".format(str(e))) raise def update_recent_listens(self, unique): """ Store the most recent listens in redis so we can fetch them easily for a recent listens page. This is not a critical action, so if it fails, it fails. Let's live with it. """ recent = [] for listen in unique: if abs(time() - listen['listened_at'].timestamp()) < self.RECENT_LISTENS_MAX_TIME_DIFFERENCE: listen['listened_at'] = listen['listened_at'].timestamp() recent.append(ujson.dumps(listen).encode('utf-8')) # Don't take this very seriously -- if it fails, really no big deal. Let is go. if recent: self.redis.lpush(self.RECENT_LISTENS_KEY, *recent) self.redis.ltrim(self.RECENT_LISTENS_KEY, -self.RECENT_LISTENS_MAX, -1) def get_recent_listens(self, max = RECENT_LISTENS_MAX): """ Get the max number of most recent listens """ recent = [] for listen in self.redis.lrange(self.RECENT_LISTENS_KEY, 0, max - 1): recent.append(Listen.from_json(ujson.loads(listen))) return recent
1
15,245
I changed the name of the key, to make deployment easier.
metabrainz-listenbrainz-server
py
@@ -545,6 +545,7 @@ func environmentConfig() (Config, error) { NvidiaRuntime: os.Getenv("ECS_NVIDIA_RUNTIME"), TaskMetadataAZDisabled: utils.ParseBool(os.Getenv("ECS_DISABLE_TASK_METADATA_AZ"), false), CgroupCPUPeriod: parseCgroupCPUPeriod(), + SpotInstanceDrainingEnabled: utils.ParseBool(os.Getenv("ECS_SPOT_INSTANCE_DRAINING_ENABLED"), false), }, err }
1
// Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package config import ( "encoding/json" "errors" "fmt" "io/ioutil" "os" "reflect" "strings" "time" apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" "github.com/aws/amazon-ecs-agent/agent/dockerclient" "github.com/aws/amazon-ecs-agent/agent/ec2" "github.com/aws/amazon-ecs-agent/agent/utils" "github.com/cihub/seelog" ) const ( // http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker DockerReservedPort = 2375 DockerReservedSSLPort = 2376 // DockerTagSeparator is the charactor used to separate names and tag in docker DockerTagSeparator = ":" // DefaultDockerTag is the default tag used by docker DefaultDockerTag = "latest" SSHPort = 22 // AgentIntrospectionPort is used to serve the metadata about the agent and to query the tasks being managed by the agent. AgentIntrospectionPort = 51678 // AgentCredentialsPort is used to serve the credentials for tasks. AgentCredentialsPort = 51679 // AgentPrometheusExpositionPort is used to expose Prometheus metrics that can be scraped by a Prometheus server AgentPrometheusExpositionPort = 51680 // defaultConfigFileName is the default (json-formatted) config file defaultConfigFileName = "/etc/ecs_container_agent/config.json" // DefaultClusterName is the name of the default cluster. DefaultClusterName = "default" // DefaultTaskCleanupWaitDuration specifies the default value for task cleanup duration. It is used to // clean up task's containers. DefaultTaskCleanupWaitDuration = 3 * time.Hour // DefaultPollingMetricsWaitDuration specifies the default value for polling metrics wait duration // This is only used when PollMetrics is set to true DefaultPollingMetricsWaitDuration = 15 * time.Second // defaultDockerStopTimeout specifies the value for container stop timeout duration defaultDockerStopTimeout = 30 * time.Second // DefaultImageCleanupTimeInterval specifies the default value for image cleanup duration. It is used to // remove the images pulled by agent. DefaultImageCleanupTimeInterval = 30 * time.Minute // DefaultNumImagesToDeletePerCycle specifies the default number of images to delete when agent performs // image cleanup. DefaultNumImagesToDeletePerCycle = 5 // DefaultNumNonECSContainersToDeletePerCycle specifies the default number of nonecs containers to delete when agent performs // nonecs containers cleanup. DefaultNumNonECSContainersToDeletePerCycle = 5 // DefaultImageDeletionAge specifies the default value for minimum amount of elapsed time after an image // has been pulled before it can be deleted. DefaultImageDeletionAge = 1 * time.Hour // minimumTaskCleanupWaitDuration specifies the minimum duration to wait before cleaning up // a task's container. This is used to enforce sane values for the config.TaskCleanupWaitDuration field. minimumTaskCleanupWaitDuration = 1 * time.Minute // minimumImagePullInactivityTimeout specifies the minimum amount of time for that an image can be // 'stuck' in the pull / unpack step. Very small values are unsafe and lead to high failure rate. minimumImagePullInactivityTimeout = 1 * time.Minute // minimumPollingMetricsWaitDuration specifies the minimum duration to wait before polling for new stats // from docker. This is only used when PollMetrics is set to true minimumPollingMetricsWaitDuration = 1 * time.Second // maximumPollingMetricsWaitDuration specifies the maximum duration to wait before polling for new stats // from docker. This is only used when PollMetrics is set to true maximumPollingMetricsWaitDuration = 20 * time.Second // minimumDockerStopTimeout specifies the minimum value for docker StopContainer API minimumDockerStopTimeout = 1 * time.Second // minimumImageCleanupInterval specifies the minimum time for agent to wait before performing // image cleanup. minimumImageCleanupInterval = 10 * time.Minute // minimumNumImagesToDeletePerCycle specifies the minimum number of images that to be deleted when // performing image cleanup. minimumNumImagesToDeletePerCycle = 1 // defaultCNIPluginsPath is the default path where cni binaries are located defaultCNIPluginsPath = "/amazon-ecs-cni-plugins" // DefaultMinSupportedCNIVersion denotes the minimum version of cni spec required DefaultMinSupportedCNIVersion = "0.3.0" // pauseContainerTarball is the path to the pause container tarball pauseContainerTarballPath = "/images/amazon-ecs-pause.tar" // DefaultTaskMetadataSteadyStateRate is set as 40. This is arrived from our benchmarking // results where task endpoint can handle 4000 rps effectively. Here, 100 containers // will be able to send out 40 rps. DefaultTaskMetadataSteadyStateRate = 40 // DefaultTaskMetadataBurstRate is set to handle 60 burst requests at once DefaultTaskMetadataBurstRate = 60 //Known cached image names CachedImageNamePauseContainer = "amazon/amazon-ecs-pause:0.1.0" CachedImageNameAgentContainer = "amazon/amazon-ecs-agent:latest" // DefaultNvidiaRuntime is the name of the runtime to pass Nvidia GPUs to containers DefaultNvidiaRuntime = "nvidia" // defaultCgroupCPUPeriod is set to 100 ms to set isCFS period and quota for task limits defaultCgroupCPUPeriod = 100 * time.Millisecond maximumCgroupCPUPeriod = 100 * time.Millisecond minimumCgroupCPUPeriod = 8 * time.Millisecond ) const ( // ImagePullDefaultBehavior specifies the behavior that if an image pull API call fails, // agent tries to start from the Docker image cache anyway, assuming that the image has not changed. ImagePullDefaultBehavior ImagePullBehaviorType = iota // ImagePullAlwaysBehavior specifies the behavior that if an image pull API call fails, // the task fails instead of using cached image. ImagePullAlwaysBehavior // ImagePullOnceBehavior specifies the behavior that agent will only attempt to pull // the same image once, once an image is pulled, local image cache will be used // for all the containers. ImagePullOnceBehavior // ImagePullPreferCachedBehavior specifies the behavior that agent will only attempt to pull // the image if there is no cached image. ImagePullPreferCachedBehavior ) const ( // When ContainerInstancePropagateTagsFromNoneType is specified, no DescribeTags // API call will be made. ContainerInstancePropagateTagsFromNoneType ContainerInstancePropagateTagsFromType = iota // When ContainerInstancePropagateTagsFromEC2InstanceType is specified, agent will // make DescribeTags API call to get tags remotely. ContainerInstancePropagateTagsFromEC2InstanceType ) var ( // DefaultPauseContainerImageName is the name of the pause container image. The linker's // load flags are used to populate this value from the Makefile DefaultPauseContainerImageName = "" // DefaultPauseContainerTag is the tag for the pause container image. The linker's load // flags are used to populate this value from the Makefile DefaultPauseContainerTag = "" ) // Merge merges two config files, preferring the ones on the left. Any nil or // zero values present in the left that are not present in the right will be // overridden func (cfg *Config) Merge(rhs Config) *Config { left := reflect.ValueOf(cfg).Elem() right := reflect.ValueOf(&rhs).Elem() for i := 0; i < left.NumField(); i++ { leftField := left.Field(i) if utils.ZeroOrNil(leftField.Interface()) { leftField.Set(reflect.ValueOf(right.Field(i).Interface())) } } return cfg //make it chainable } // NewConfig returns a config struct created by merging environment variables, // a config file, and EC2 Metadata info. // The 'config' struct it returns can be used, even if an error is returned. An // error is returned, however, if the config is incomplete in some way that is // considered fatal. func NewConfig(ec2client ec2.EC2MetadataClient) (*Config, error) { var errs []error envConfig, err := environmentConfig() //Environment overrides all else if err != nil { errs = append(errs, err) } config := &envConfig if config.complete() { // No need to do file / network IO return config, nil } fcfg, err := fileConfig() if err != nil { errs = append(errs, err) } config.Merge(fcfg) config.Merge(userDataConfig(ec2client)) if config.AWSRegion == "" { if config.NoIID { // get it from AWS SDK if we don't have instance identity document awsRegion, err := ec2client.Region() if err != nil { errs = append(errs, err) } config.AWSRegion = awsRegion } else { // Get it from metadata only if we need to (network io) config.Merge(ec2MetadataConfig(ec2client)) } } return config, config.mergeDefaultConfig(errs) } func (config *Config) mergeDefaultConfig(errs []error) error { config.trimWhitespace() config.Merge(DefaultConfig()) err := config.validateAndOverrideBounds() if err != nil { errs = append(errs, err) } if len(errs) != 0 { return apierrors.NewMultiError(errs...) } return nil } // trimWhitespace trims whitespace from all string cfg values with the // `trim` tag func (cfg *Config) trimWhitespace() { cfgElem := reflect.ValueOf(cfg).Elem() cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type() for i := 0; i < cfgElem.NumField(); i++ { cfgField := cfgElem.Field(i) if !cfgField.CanInterface() { continue } trimTag := cfgStructField.Field(i).Tag.Get("trim") if len(trimTag) == 0 { continue } if cfgField.Kind() != reflect.String { seelog.Warnf("Cannot trim non-string field type %v index %v", cfgField.Kind().String(), i) continue } str := cfgField.Interface().(string) cfgField.SetString(strings.TrimSpace(str)) } } // validateAndOverrideBounds performs validation over members of the Config struct // and check the value against the minimum required value. func (cfg *Config) validateAndOverrideBounds() error { err := cfg.checkMissingAndDepreciated() if err != nil { return err } if cfg.DockerStopTimeout < minimumDockerStopTimeout { return fmt.Errorf("config: invalid value for docker container stop timeout: %v", cfg.DockerStopTimeout.String()) } if cfg.ContainerStartTimeout < minimumContainerStartTimeout { return fmt.Errorf("config: invalid value for docker container start timeout: %v", cfg.ContainerStartTimeout.String()) } var badDrivers []string for _, driver := range cfg.AvailableLoggingDrivers { _, ok := dockerclient.LoggingDriverMinimumVersion[driver] if !ok { badDrivers = append(badDrivers, string(driver)) } } if len(badDrivers) > 0 { return errors.New("Invalid logging drivers: " + strings.Join(badDrivers, ", ")) } // If a value has been set for taskCleanupWaitDuration and the value is less than the minimum allowed cleanup duration, // print a warning and override it if cfg.TaskCleanupWaitDuration < minimumTaskCleanupWaitDuration { seelog.Warnf("Invalid value for ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultTaskCleanupWaitDuration.String(), cfg.TaskCleanupWaitDuration, minimumTaskCleanupWaitDuration) cfg.TaskCleanupWaitDuration = DefaultTaskCleanupWaitDuration } if cfg.ImagePullInactivityTimeout < minimumImagePullInactivityTimeout { seelog.Warnf("Invalid value for image pull inactivity timeout duration, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", defaultImagePullInactivityTimeout.String(), cfg.ImagePullInactivityTimeout, minimumImagePullInactivityTimeout) cfg.ImagePullInactivityTimeout = defaultImagePullInactivityTimeout } if cfg.ImageCleanupInterval < minimumImageCleanupInterval { seelog.Warnf("Invalid value for ECS_IMAGE_CLEANUP_INTERVAL, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultImageCleanupTimeInterval.String(), cfg.ImageCleanupInterval, minimumImageCleanupInterval) cfg.ImageCleanupInterval = DefaultImageCleanupTimeInterval } if cfg.NumImagesToDeletePerCycle < minimumNumImagesToDeletePerCycle { seelog.Warnf("Invalid value for number of images to delete for image cleanup, will be overridden with the default value: %d. Parsed value: %d, minimum value: %d.", DefaultImageDeletionAge, cfg.NumImagesToDeletePerCycle, minimumNumImagesToDeletePerCycle) cfg.NumImagesToDeletePerCycle = DefaultNumImagesToDeletePerCycle } if cfg.TaskMetadataSteadyStateRate <= 0 || cfg.TaskMetadataBurstRate <= 0 { seelog.Warnf("Invalid values for rate limits, will be overridden with default values: %d,%d.", DefaultTaskMetadataSteadyStateRate, DefaultTaskMetadataBurstRate) cfg.TaskMetadataSteadyStateRate = DefaultTaskMetadataSteadyStateRate cfg.TaskMetadataBurstRate = DefaultTaskMetadataBurstRate } // check the PollMetrics specific configurations cfg.pollMetricsOverrides() cfg.platformOverrides() return nil } func (cfg *Config) pollMetricsOverrides() { if cfg.PollMetrics { if cfg.PollingMetricsWaitDuration < minimumPollingMetricsWaitDuration { seelog.Warnf("Invalid value for polling metrics wait duration, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultPollingMetricsWaitDuration.String(), cfg.PollingMetricsWaitDuration, minimumPollingMetricsWaitDuration) cfg.PollingMetricsWaitDuration = DefaultPollingMetricsWaitDuration } if cfg.PollingMetricsWaitDuration > maximumPollingMetricsWaitDuration { seelog.Warnf("Invalid value for polling metrics wait duration, will be overridden with the default value: %s. Parsed value: %v, maximum value: %v.", DefaultPollingMetricsWaitDuration.String(), cfg.PollingMetricsWaitDuration, maximumPollingMetricsWaitDuration) cfg.PollingMetricsWaitDuration = DefaultPollingMetricsWaitDuration } } } // checkMissingAndDeprecated checks all zero-valued fields for tags of the form // missing:STRING and acts based on that string. Current options are: fatal, // warn. Fatal will result in an error being returned, warn will result in a // warning that the field is missing being logged. func (cfg *Config) checkMissingAndDepreciated() error { cfgElem := reflect.ValueOf(cfg).Elem() cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type() fatalFields := []string{} for i := 0; i < cfgElem.NumField(); i++ { cfgField := cfgElem.Field(i) if utils.ZeroOrNil(cfgField.Interface()) { missingTag := cfgStructField.Field(i).Tag.Get("missing") if len(missingTag) == 0 { continue } switch missingTag { case "warn": seelog.Warnf("Configuration key not set, key: %v", cfgStructField.Field(i).Name) case "fatal": seelog.Criticalf("Configuration key not set, key: %v", cfgStructField.Field(i).Name) fatalFields = append(fatalFields, cfgStructField.Field(i).Name) default: seelog.Warnf("Unexpected `missing` tag value, tag %v", missingTag) } } else { // present deprecatedTag := cfgStructField.Field(i).Tag.Get("deprecated") if len(deprecatedTag) == 0 { continue } seelog.Warnf("Use of deprecated configuration key, key: %v message: %v", cfgStructField.Field(i).Name, deprecatedTag) } } if len(fatalFields) > 0 { return errors.New("Missing required fields: " + strings.Join(fatalFields, ", ")) } return nil } // complete returns true if all fields of the config are populated / nonzero func (cfg *Config) complete() bool { cfgElem := reflect.ValueOf(cfg).Elem() for i := 0; i < cfgElem.NumField(); i++ { if utils.ZeroOrNil(cfgElem.Field(i).Interface()) { return false } } return true } func fileConfig() (Config, error) { fileName := utils.DefaultIfBlank(os.Getenv("ECS_AGENT_CONFIG_FILE_PATH"), defaultConfigFileName) cfg := Config{} file, err := os.Open(fileName) if err != nil { return cfg, nil } data, err := ioutil.ReadAll(file) if err != nil { seelog.Errorf("Unable to read cfg file, err %v", err) return cfg, err } if strings.TrimSpace(string(data)) == "" { // empty file, not an error return cfg, nil } err = json.Unmarshal(data, &cfg) if err != nil { seelog.Criticalf("Error reading cfg json data, err %v", err) return cfg, err } // Handle any deprecated keys correctly here if utils.ZeroOrNil(cfg.Cluster) && !utils.ZeroOrNil(cfg.ClusterArn) { cfg.Cluster = cfg.ClusterArn } return cfg, nil } // userDataConfig reads configuration JSON from instance's userdata. It doesn't // return any error as it's entirely optional to configure the ECS agent using // this method. // Example: // {"ECSAgentConfiguration":{"Cluster":"default"}} func userDataConfig(ec2Client ec2.EC2MetadataClient) Config { type userDataParser struct { Config Config `json:"ECSAgentConfiguration"` } parsedUserData := userDataParser{ Config: Config{}, } userData, err := ec2Client.GetUserData() if err != nil { seelog.Warnf("Unable to fetch user data: %v", err) // Unable to read userdata from instance metadata. Just // return early return parsedUserData.Config } // In the future, if we want to support base64 encoded config, // we'd need to add logic to decode the string here. err = json.Unmarshal([]byte(userData), &parsedUserData) if err != nil { seelog.Debugf("Non-json user data, skip merging into agent config: %v", err) // Unable to parse userdata as a valid JSON. Return the // empty config return Config{} } return parsedUserData.Config } // environmentConfig reads the given configs from the environment and attempts // to convert them to the given type func environmentConfig() (Config, error) { dataDir := os.Getenv("ECS_DATADIR") steadyStateRate, burstRate := parseTaskMetadataThrottles() var errs []error instanceAttributes, errs := parseInstanceAttributes(errs) containerInstanceTags, errs := parseContainerInstanceTags(errs) additionalLocalRoutes, errs := parseAdditionalLocalRoutes(errs) var err error if len(errs) > 0 { err = apierrors.NewMultiError(errs...) } return Config{ Cluster: os.Getenv("ECS_CLUSTER"), APIEndpoint: os.Getenv("ECS_BACKEND_HOST"), AWSRegion: os.Getenv("AWS_DEFAULT_REGION"), DockerEndpoint: os.Getenv("DOCKER_HOST"), ReservedPorts: parseReservedPorts("ECS_RESERVED_PORTS"), ReservedPortsUDP: parseReservedPorts("ECS_RESERVED_PORTS_UDP"), DataDir: dataDir, Checkpoint: parseCheckpoint(dataDir), EngineAuthType: os.Getenv("ECS_ENGINE_AUTH_TYPE"), EngineAuthData: NewSensitiveRawMessage([]byte(os.Getenv("ECS_ENGINE_AUTH_DATA"))), UpdatesEnabled: utils.ParseBool(os.Getenv("ECS_UPDATES_ENABLED"), false), UpdateDownloadDir: os.Getenv("ECS_UPDATE_DOWNLOAD_DIR"), DisableMetrics: utils.ParseBool(os.Getenv("ECS_DISABLE_METRICS"), false), ReservedMemory: parseEnvVariableUint16("ECS_RESERVED_MEMORY"), AvailableLoggingDrivers: parseAvailableLoggingDrivers(), PrivilegedDisabled: utils.ParseBool(os.Getenv("ECS_DISABLE_PRIVILEGED"), false), SELinuxCapable: utils.ParseBool(os.Getenv("ECS_SELINUX_CAPABLE"), false), AppArmorCapable: utils.ParseBool(os.Getenv("ECS_APPARMOR_CAPABLE"), false), TaskCleanupWaitDuration: parseEnvVariableDuration("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"), TaskENIEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_ENI"), false), TaskIAMRoleEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_IAM_ROLE"), false), DeleteNonECSImagesEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP"), false), TaskCPUMemLimit: parseTaskCPUMemLimitEnabled(), DockerStopTimeout: parseDockerStopTimeout(), ContainerStartTimeout: parseContainerStartTimeout(), ImagePullInactivityTimeout: parseImagePullInactivityTimeout(), CredentialsAuditLogFile: os.Getenv("ECS_AUDIT_LOGFILE"), CredentialsAuditLogDisabled: utils.ParseBool(os.Getenv("ECS_AUDIT_LOGFILE_DISABLED"), false), TaskIAMRoleEnabledForNetworkHost: utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST"), false), ImageCleanupDisabled: utils.ParseBool(os.Getenv("ECS_DISABLE_IMAGE_CLEANUP"), false), MinimumImageDeletionAge: parseEnvVariableDuration("ECS_IMAGE_MINIMUM_CLEANUP_AGE"), ImageCleanupInterval: parseEnvVariableDuration("ECS_IMAGE_CLEANUP_INTERVAL"), NumImagesToDeletePerCycle: parseNumImagesToDeletePerCycle(), NumNonECSContainersToDeletePerCycle: parseNumNonECSContainersToDeletePerCycle(), ImagePullBehavior: parseImagePullBehavior(), ImageCleanupExclusionList: parseImageCleanupExclusionList("ECS_EXCLUDE_UNTRACKED_IMAGE"), InstanceAttributes: instanceAttributes, CNIPluginsPath: os.Getenv("ECS_CNI_PLUGINS_PATH"), AWSVPCBlockInstanceMetdata: utils.ParseBool(os.Getenv("ECS_AWSVPC_BLOCK_IMDS"), false), AWSVPCAdditionalLocalRoutes: additionalLocalRoutes, ContainerMetadataEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_CONTAINER_METADATA"), false), DataDirOnHost: os.Getenv("ECS_HOST_DATA_DIR"), OverrideAWSLogsExecutionRole: utils.ParseBool(os.Getenv("ECS_ENABLE_AWSLOGS_EXECUTIONROLE_OVERRIDE"), false), CgroupPath: os.Getenv("ECS_CGROUP_PATH"), TaskMetadataSteadyStateRate: steadyStateRate, TaskMetadataBurstRate: burstRate, SharedVolumeMatchFullConfig: utils.ParseBool(os.Getenv("ECS_SHARED_VOLUME_MATCH_FULL_CONFIG"), false), ContainerInstanceTags: containerInstanceTags, ContainerInstancePropagateTagsFrom: parseContainerInstancePropagateTagsFrom(), PollMetrics: utils.ParseBool(os.Getenv("ECS_POLL_METRICS"), false), PollingMetricsWaitDuration: parseEnvVariableDuration("ECS_POLLING_METRICS_WAIT_DURATION"), DisableDockerHealthCheck: utils.ParseBool(os.Getenv("ECS_DISABLE_DOCKER_HEALTH_CHECK"), false), GPUSupportEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_GPU_SUPPORT"), false), NvidiaRuntime: os.Getenv("ECS_NVIDIA_RUNTIME"), TaskMetadataAZDisabled: utils.ParseBool(os.Getenv("ECS_DISABLE_TASK_METADATA_AZ"), false), CgroupCPUPeriod: parseCgroupCPUPeriod(), }, err } func ec2MetadataConfig(ec2client ec2.EC2MetadataClient) Config { iid, err := ec2client.InstanceIdentityDocument() if err != nil { seelog.Criticalf("Unable to communicate with EC2 Metadata service to infer region: %v", err.Error()) return Config{} } return Config{AWSRegion: iid.Region} } // String returns a lossy string representation of the config suitable for human readable display. // Consequently, it *should not* return any sensitive information. func (cfg *Config) String() string { return fmt.Sprintf( "Cluster: %v, "+ " Region: %v, "+ " DataDir: %v,"+ " Checkpoint: %v, "+ "AuthType: %v, "+ "UpdatesEnabled: %v, "+ "DisableMetrics: %v, "+ "PollMetrics: %v, "+ "PollingMetricsWaitDuration: %v, "+ "ReservedMem: %v, "+ "TaskCleanupWaitDuration: %v, "+ "DockerStopTimeout: %v, "+ "ContainerStartTimeout: %v, "+ "TaskCPUMemLimit: %v, "+ "%s", cfg.Cluster, cfg.AWSRegion, cfg.DataDir, cfg.Checkpoint, cfg.EngineAuthType, cfg.UpdatesEnabled, cfg.DisableMetrics, cfg.PollMetrics, cfg.PollingMetricsWaitDuration, cfg.ReservedMemory, cfg.TaskCleanupWaitDuration, cfg.DockerStopTimeout, cfg.ContainerStartTimeout, cfg.TaskCPUMemLimit, cfg.platformString(), ) }
1
23,241
isn't this going to be true by default? if yes, then let's have the env var named `ECS_DISABLE_SPOT_INSTANCE_DRAINING` and have default as false.
aws-amazon-ecs-agent
go
@@ -3,6 +3,8 @@ Provides Dimension objects for tracking the properties of a value, axis or map dimension. Also supplies the Dimensioned abstract baseclass for classes that accept Dimension values. """ +from __future__ import unicode_literals + import re from operator import itemgetter
1
""" Provides Dimension objects for tracking the properties of a value, axis or map dimension. Also supplies the Dimensioned abstract baseclass for classes that accept Dimension values. """ import re from operator import itemgetter try: from cyordereddict import OrderedDict except: from collections import OrderedDict import numpy as np import param from ..core.util import (basestring, sanitize_identifier, group_sanitizer, label_sanitizer, max_range, find_range, dimension_sanitizer) from .options import Store, StoreOptions from .pprint import PrettyPrinter # Alias parameter support for pickle loading ALIASES = {'key_dimensions': 'kdims', 'value_dimensions': 'vdims', 'constant_dimensions': 'cdims'} title_format = "{name}: {val}{unit}" def param_aliases(d): """ Called from __setstate__ in LabelledData in order to load old pickles with outdated parameter names. Warning: We want to keep pickle hacking to a minimum! """ for old, new in ALIASES.items(): old_param = '_%s_param_value' % old new_param = '_%s_param_value' % new if old_param in d: d[new_param] = d.pop(old_param) return d class Dimension(param.Parameterized): """ Dimension objects are used to specify some important general features that may be associated with a collection of values. For instance, a Dimension may specify that a set of numeric values actually correspond to 'Height' (dimension name), in units of meters, and that allowed values must be floats greater than zero. In addition, Dimensions can be declared as cyclic, support categorical data using a finite set of allowed, ordered values and support a custom, pretty-printed representation. """ name = param.String(doc=""" Optional name associated with the Dimension. For instance, 'height' or 'weight'.""") cyclic = param.Boolean(default=False, doc=""" Whether the range of this feature is cyclic such that the maximum allowed value (defined by the range parameter) is continuous with the minimum allowed value.""") value_format = param.Callable(default=None, doc=""" Formatting function applied to each value before display.""") range = param.Tuple(default=(None, None), doc=""" Specifies the minimum and maximum allowed values for a Dimension. None is used to represent an unlimited bound.""") soft_range = param.Tuple(default=(None, None), doc=""" Specifies a minimum and maximum reference value, which may be overridden by the data.""") type = param.Parameter(default=None, doc=""" Optional type associated with the Dimension values. The type may be an inbuilt constructor (such as int, str, float) or a custom class object.""") unit = param.String(default=None, allow_None=True, doc=""" Optional unit string associated with the Dimension. For instance, the string 'm' may be used represent units of meters and 's' to represent units of seconds.""") values = param.ClassSelector(class_=(str, list), default=[], doc=""" Optional set of allowed values for the dimension that can also be used to retain a categorical ordering. Setting values to 'initial' indicates that the values will be added during construction.""") # Defines default formatting by type type_formatters = {} unit_format = ' ({unit})' def __init__(self, name, **params): """ Initializes the Dimension object with the given name. """ if isinstance(name, Dimension): existing_params = dict(name.get_param_values()) else: existing_params = {'name': name} all_params = dict(existing_params, **params) if isinstance(all_params['name'], tuple): alias, long_name = all_params['name'] dimension_sanitizer.add_aliases(**{alias:long_name}) all_params['name'] = long_name super(Dimension, self).__init__(**all_params) def __call__(self, name=None, **overrides): """ Derive a new Dimension that inherits existing parameters except for the supplied, explicit overrides """ settings = dict(self.get_param_values(onlychanged=True), **overrides) if name is not None: settings['name'] = name return self.__class__(**settings) @property def pprint_label(self): "The pretty-printed label string for the Dimension" unit = '' if self.unit is None else self.unit_format.format(unit=self.unit) return self.name + unit def pprint_value(self, value): """ Applies the defined formatting to the value. """ own_type = type(value) if self.type is None else self.type formatter = (self.value_format if self.value_format else self.type_formatters.get(own_type)) if formatter: if callable(formatter): return formatter(value) elif isinstance(formatter, basestring): if re.findall(r"\{(\w+)\}", formatter): return formatter.format(value) else: return formatter % value return value def __repr__(self): return self.pprint() def pprint_value_string(self, value): """ Pretty prints the dimension name and value using the global title_format variable, including the unit string (if set). Numeric types are printed to the stated rounding level. """ unit = '' if self.unit is None else ' ' + self.unit value = self.pprint_value(value) return title_format.format(name=self.name, val=value, unit=unit) def __hash__(self): """ The hash allows two Dimension objects to be compared; if the hashes are equal, all the parameters of the Dimensions are also equal. """ return sum([hash(value) for name, value in self.get_param_values() if not isinstance(value, list)]) def __str__(self): return self.pprint_label def __eq__(self, other): "Implements equals operator including sanitized comparison." dim_matches = [self.name, dimension_sanitizer(self.name)] return other.name in dim_matches if isinstance(other, Dimension) else other in dim_matches def __ne__(self, other): "Implements not equal operator including sanitized comparison." return not self.__eq__(other) def __lt__(self, other): "Dimensions are sorted alphanumerically by name" return self.name < other.name if isinstance(other, Dimension) else self.name < other class LabelledData(param.Parameterized): """ LabelledData is a mix-in class designed to introduce the group and label parameters (and corresponding methods) to any class containing data. This class assumes that the core data contents will be held in the attribute called 'data'. Used together, group and label are designed to allow a simple and flexible means of addressing data. For instance, if you are collecting the heights of people in different demographics, you could specify the values of your objects as 'Height' and then use the label to specify the (sub)population. In this scheme, one object may have the parameters set to [group='Height', label='Children'] and another may use [group='Height', label='Adults']. Note: Another level of specification is implict in the type (i.e class) of the LabelledData object. A full specification of a LabelledData object is therefore given by the tuple (<type>, <group>, label>). This additional level of specification is used in the traverse method. Any strings can be used for the group and label, but it can be convenient to use a capitalized string of alphanumeric characters, in which case the keys used for matching in the matches and traverse method will correspond exactly to {type}.{group}.{label}. Otherwise the strings provided will be sanitized to be valid capitalized Python identifiers, which works fine but can sometimes be confusing. """ group = param.String(default='LabelledData', constant=True, doc=""" A string describing the type of data contained by the object. By default this will typically mirror the class name.""") label = param.String(default='', constant=True, doc=""" Optional label describing the data, typically reflecting where or how it was measured. The label should allow a specific measurement or dataset to be referenced for a given group..""") _deep_indexable = False def __init__(self, data, id=None, **params): """ All LabelledData subclasses must supply data to the constructor, which will be held on the .data attribute. This class also has an id instance attribute, which may be set to associate some custom options with the object. """ self.data = data self.id = id if isinstance(params.get('label',None), tuple): (alias, long_name) = params['label'] label_sanitizer.add_aliases(**{alias:long_name}) params['label'] = long_name if isinstance(params.get('group',None), tuple): (alias, long_name) = params['group'] group_sanitizer.add_aliases(**{alias:long_name}) params['group'] = long_name super(LabelledData, self).__init__(**params) if not group_sanitizer.allowable(self.group): raise ValueError("Supplied group %r contains invalid characters." % self.group) elif not label_sanitizer.allowable(self.label): raise ValueError("Supplied label %r contains invalid characters." % self.label) def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides): """ Returns a clone of the object with matching parameter values containing the specified args and kwargs. If shared_data is set to True and no data explicitly supplied, the clone will share data with the original. May also supply a new_type, which will inherit all shared parameters. """ params = dict(self.get_param_values()) if new_type is None: clone_type = self.__class__ else: clone_type = new_type new_params = new_type.params() params = {k: v for k, v in params.items() if k in new_params} settings = dict(params, **overrides) if data is None and shared_data: data = self.data # Apply name mangling for __ attribute pos_args = getattr(self, '_' + type(self).__name__ + '__pos_params', []) return clone_type(data, *args, **{k:v for k,v in settings.items() if k not in pos_args}) def relabel(self, label=None, group=None, depth=0): """ Assign a new label and/or group to an existing LabelledData object, creating a clone of the object with the new settings. """ keywords = [('label',label), ('group',group)] obj = self.clone(self.data, **{k:v for k,v in keywords if v is not None}) if (depth > 0) and getattr(obj, '_deep_indexable', False): for k, v in obj.items(): obj[k] = v.relabel(group=group, label=label, depth=depth-1) return obj def matches(self, spec): """ A specification may be a class, a tuple or a string. Equivalent to isinstance if a class is supplied, otherwise matching occurs on type, group and label. These may be supplied as a tuple of strings or as a single string of the form "{type}.{group}.{label}". Matching may be done on {type} alone, {type}.{group}, or {type}.{group}.{label}. The strings for the type, group, and label will each be sanitized before the match, and so the sanitized versions of those values will need to be provided if the match is to succeed. """ if callable(spec) and not isinstance(spec, type): return spec(self) elif isinstance(spec, type): return isinstance(self, spec) specification = (self.__class__.__name__, self.group, self.label) split_spec = tuple(spec.split('.')) if not isinstance(spec, tuple) else spec split_spec, nocompare = zip(*((None, True) if s == '*' or s is None else (s, False) for s in split_spec)) if all(nocompare): return True match_fn = itemgetter(*(idx for idx, nc in enumerate(nocompare) if not nc)) self_spec = match_fn(split_spec) unescaped_match = match_fn(specification[:len(split_spec)]) == self_spec if unescaped_match: return True sanitizers = [sanitize_identifier, group_sanitizer, label_sanitizer] identifier_specification = tuple(fn(ident, escape=False) for ident, fn in zip(specification, sanitizers)) identifier_match = match_fn(identifier_specification[:len(split_spec)]) == self_spec return identifier_match def traverse(self, fn, specs=None, full_breadth=True): """ Traverses any nested LabelledData object (i.e LabelledData objects containing LabelledData objects), applying the supplied function to each constituent element if the supplied specifications. The output of these function calls are collected and returned in the accumulator list. If specs is None, all constituent elements are processed. Otherwise, specs must be a list of type.group.label specs, types, and functions. """ accumulator = [] matches = specs is None if not matches: for spec in specs: matches = self.matches(spec) if matches: break if matches: accumulator.append(fn(self)) # Assumes composite objects are iterables if self._deep_indexable: for el in self: accumulator += el.traverse(fn, specs, full_breadth) if not full_breadth: break return accumulator def map(self, map_fn, specs=None, clone=True): """ Recursively replaces elements using a map function when the specification applies. """ applies = specs is None or any(self.matches(spec) for spec in specs) if self._deep_indexable: deep_mapped = self.clone(shared_data=False) if clone else self for k, v in self.items(): deep_mapped[k] = v.map(map_fn, specs, clone) if applies: deep_mapped = map_fn(deep_mapped) return deep_mapped else: return map_fn(self) if applies else self def __getstate__(self): """ When pickling, make sure to save the relevant style and plotting options as well. """ obj_dict = self.__dict__.copy() try: if Store.save_option_state and (obj_dict.get('id', None) is not None): custom_key = '_custom_option_%d' % obj_dict['id'] if custom_key not in obj_dict: obj_dict[custom_key] = {backend:s[obj_dict['id']] for backend,s in Store._custom_options.items() if obj_dict['id'] in s} else: obj_dict['id'] = None except: self.warning("Could not pickle custom style information.") return obj_dict def __setstate__(self, d): """ When unpickled, restore the saved style and plotting options to ViewableElement.options. """ d = param_aliases(d) try: load_options = Store.load_counter_offset is not None if load_options: matches = [k for k in d if k.startswith('_custom_option')] for match in matches: custom_id = int(match.split('_')[-1]) if not isinstance(d[match], dict): # Backward compatibility before multiple backends backend_info = {'matplotlib':d[match]} else: backend_info = d[match] for backend, info in backend_info.items(): if backend not in Store._custom_options: Store._custom_options[backend] = {} Store._custom_options[backend][Store.load_counter_offset + custom_id] = info d.pop(match) if d['id'] is not None: d['id'] += Store.load_counter_offset else: d['id'] = None except: self.warning("Could not unpickle custom style information.") self.__dict__.update(d) class Dimensioned(LabelledData): """ Dimensioned is a base class that allows the data contents of a class to be associated with dimensions. The contents associated with dimensions may be partitioned into one of three types * key dimensions: These are the dimensions that can be indexed via the __getitem__ method. Dimension objects supporting key dimensions must support indexing over these dimensions and may also support slicing. This list ordering of dimensions describes the positional components of each multi-dimensional indexing operation. For instance, if the key dimension names are 'weight' followed by 'height' for Dimensioned object 'obj', then obj[80,175] indexes a weight of 80 and height of 175. Accessed using either kdims or key_dimensions. * value dimensions: These dimensions correspond to any data held on the Dimensioned object not in the key dimensions. Indexing by value dimension is supported by dimension name (when there are multiple possible value dimensions); no slicing semantics is supported and all the data associated with that dimension will be returned at once. Note that it is not possible to mix value dimensions and deep dimensions. Accessed using either vdims or value_dimensions. * deep dimensions: These are dynamically computed dimensions that belong to other Dimensioned objects that are nested in the data. Objects that support this should enable the _deep_indexable flag. Note that it is not possible to mix value dimensions and deep dimensions. Accessed using either ddims or deep_dimensions. Dimensioned class support generalized methods for finding the range and type of values along a particular Dimension. The range method relies on the appropriate implementation of the dimension_values methods on subclasses. The index of an arbitrary dimension is its positional index in the list of all dimensions, starting with the key dimensions, followed by the value dimensions and ending with the deep dimensions. """ cdims = param.Dict(default=OrderedDict(), doc=""" The constant dimensions defined as a dictionary of Dimension:value pairs providing additional dimension information about the object. Aliased with constant_dimensions.""") kdims = param.List(bounds=(0, None), constant=True, doc=""" The key dimensions defined as list of dimensions that may be used in indexing (and potential slicing) semantics. The order of the dimensions listed here determines the semantics of each component of a multi-dimensional indexing operation. Aliased with key_dimensions.""") vdims = param.List(bounds=(0, None), constant=True, doc=""" The value dimensions defined as the list of dimensions used to describe the components of the data. If multiple value dimensions are supplied, a particular value dimension may be indexed by name after the key dimensions. Aliased with value_dimensions.""") group = param.String(default='Dimensioned', constant=True, doc=""" A string describing the data wrapped by the object.""") __abstract = True _sorted = False _dim_groups = ['kdims', 'vdims', 'cdims', 'ddims'] _dim_aliases = dict(key_dimensions='kdims', value_dimensions='vdims', constant_dimensions='cdims', deep_dimensions='ddims') # Long-name aliases @property def key_dimensions(self): return self.kdims @property def value_dimensions(self): return self.vdims @property def constant_dimensions(self): return self.cdims @property def deep_dimensions(self): return self.ddims def __init__(self, data, **params): for group in self._dim_groups+list(self._dim_aliases.keys()): if group in ['deep_dimensions', 'ddims']: continue if group in params: if group in self._dim_aliases: params[self._dim_aliases[group]] = params.pop(group) group = self._dim_aliases[group] if group == 'cdims': dimensions = {d if isinstance(d, Dimension) else Dimension(d): val for d, val in params.pop(group).items()} else: dimensions = [d if isinstance(d, Dimension) else Dimension(d) for d in params.pop(group)] params[group] = dimensions super(Dimensioned, self).__init__(data, **params) self.ndims = len(self.kdims) cdims = [(d.name, val) for d, val in self.cdims.items()] self._cached_constants = OrderedDict(cdims) self._settings = None def _valid_dimensions(self, dimensions): """Validates key dimension input Returns kdims if no dimensions are specified""" if dimensions is None: dimensions = self.kdims elif not isinstance(dimensions, list): dimensions = [dimensions] valid_dimensions = [] for dim in dimensions: if isinstance(dim, Dimension): dim = dim.name if dim not in self.kdims: raise Exception("Supplied dimensions %s not found." % dim) valid_dimensions.append(dim) return valid_dimensions @property def ddims(self): "The list of deep dimensions" if self._deep_indexable and len(self): return self.values()[0].dimensions() else: return [] def dimensions(self, selection='all', label=False): """ Provides convenient access to Dimensions on nested Dimensioned objects. Dimensions can be selected by their type, i.e. 'key' or 'value' dimensions. By default 'all' dimensions are returned. """ lambdas = {'k': (lambda x: x.kdims, {'full_breadth': False}), 'v': (lambda x: x.vdims, {}), 'c': (lambda x: x.cdims, {})} aliases = {'key': 'k', 'value': 'v', 'constant': 'c'} if selection == 'all': dims = [dim for group in self._dim_groups for dim in getattr(self, group)] elif isinstance(selection, list): dims = [dim for group in selection for dim in getattr(self, '%sdims' % aliases.get(group))] elif aliases.get(selection) in lambdas: selection = aliases.get(selection, selection) lmbd, kwargs = lambdas[selection] key_traversal = self.traverse(lmbd, **kwargs) dims = [dim for keydims in key_traversal for dim in keydims] else: raise KeyError("Invalid selection %r, valid selections include" "'all', 'value' and 'key' dimensions" % repr(selection)) return [dim.name if label else dim for dim in dims] def get_dimension(self, dimension, default=None): "Access a Dimension object by name or index." all_dims = self.dimensions() if isinstance(dimension, Dimension): dimension = dimension.name if isinstance(dimension, int) and dimension < len(all_dims): return all_dims[dimension] else: return {dim.name: dim for dim in all_dims}.get(dimension, default) def get_dimension_index(self, dim): """ Returns the index of the requested dimension. """ if isinstance(dim, Dimension): dim = dim.name if isinstance(dim, int): if dim < len(self.dimensions()): return dim else: return IndexError('Dimension index out of bounds') try: sanitized = {dimension_sanitizer(kd): kd for kd in self.dimensions('key', True)} return [d.name for d in self.dimensions()].index(sanitized.get(dim, dim)) except ValueError: raise Exception("Dimension %s not found in %s." % (dim, self.__class__.__name__)) def get_dimension_type(self, dim): """ Returns the specified Dimension type if specified or if the dimension_values types are consistent otherwise None is returned. """ dim_obj = self.get_dimension(dim) if dim_obj and dim_obj.type is not None: return dim_obj.type dim_vals = [type(v) for v in self.dimension_values(dim)] if len(set(dim_vals)) == 1: return dim_vals[0] else: return None def __getitem__(self, key): """ Multi-dimensional indexing semantics is determined by the list of key dimensions. For instance, the first indexing component will index the first key dimension. After the key dimensions are given, *either* a value dimension name may follow (if there are multiple value dimensions) *or* deep dimensions may then be listed (for applicable deep dimensions). """ return self def select(self, selection_specs=None, **kwargs): """ Allows slicing or indexing into the Dimensioned object by supplying the dimension and index/slice as key value pairs. Select descends recursively through the data structure applying the key dimension selection. The 'value' keyword allows selecting the value dimensions on objects which have any declared. The selection may also be selectively applied to specific objects by supplying the selection_specs as an iterable of type.group.label specs, types or functions. """ # Apply all indexes applying on this object vdims = self.vdims+['value'] if self.vdims else [] kdims = self.kdims local_kwargs = {k: v for k, v in kwargs.items() if k in kdims+vdims} # Check selection_spec applies if selection_specs is not None: matches = any(self.matches(spec) for spec in selection_specs) else: matches = True # Apply selection to self if local_kwargs and matches: ndims = (len(self.dimensions()) if any(d in self.vdims for d in kwargs) else self.ndims) select = [slice(None) for i in range(ndims)] for dim, val in local_kwargs.items(): if dim == 'value': select += [val] else: if isinstance(val, tuple): val = slice(*val) select[self.get_dimension_index(dim)] = val if self._deep_indexable: selection = self.get(tuple(select), None) if selection is None: selection = self.clone(shared_data=False) else: selection = self[tuple(select)] else: selection = self if not isinstance(selection, Dimensioned): return selection elif type(selection) is not type(self) and isinstance(selection, Dimensioned): # Apply the selection on the selected object of a different type val_dim = ['value'] if selection.vdims else [] key_dims = selection.dimensions('key', label=True) + val_dim if any(kw in key_dims for kw in kwargs): selection = selection.select(selection_specs, **kwargs) elif isinstance(selection, Dimensioned) and selection._deep_indexable: # Apply the deep selection on each item in local selection items = [] for k, v in selection.items(): val_dim = ['value'] if v.vdims else [] dims = list(zip(*[(dimension_sanitizer(kd), kd) for kd in v.dimensions('key', label=True)])) kdims, skdims = dims if dims else ([], []) key_dims = list(kdims) + list(skdims) + val_dim if any(kw in key_dims for kw in kwargs): items.append((k, v.select(selection_specs, **kwargs))) else: items.append((k, v)) selection = selection.clone(items) return selection def dimension_values(self, dimension): """ Returns the values along the specified dimension. This method must be implemented for all Dimensioned type. """ val = self._cached_constants.get(dimension, None) if val: return np.array([val]) else: raise Exception("Dimension %s not found in %s." % (dimension, self.__class__.__name__)) def range(self, dimension, data_range=True): """ Returns the range of values along the specified dimension. If data_range is True, the data may be used to try and infer the appropriate range. Otherwise, (None,None) is returned to indicate that no range is defined. """ dimension = self.get_dimension(dimension) if dimension is None: return (None, None) if dimension.range != (None, None): return dimension.range elif not data_range: return (None, None) soft_range = [r for r in dimension.soft_range if r is not None] if dimension in self.kdims or dimension in self.vdims: dim_vals = self.dimension_values(dimension.name) return find_range(dim_vals, soft_range) dname = dimension.name match_fn = lambda x: dname in x.dimensions(['key', 'value'], True) range_fn = lambda x: x.range(dname) ranges = self.traverse(range_fn, [match_fn]) drange = max_range(ranges) return drange def __repr__(self): return PrettyPrinter.pprint(self) def __call__(self, options=None, **kwargs): """ Apply the supplied options to a clone of the object which is then returned. Note that if no options are supplied at all, all ids are reset. """ groups = set(Store.options().groups.keys()) if kwargs and set(kwargs) <= groups: if not all(isinstance(v, dict) for v in kwargs.values()): raise Exception("The %s options must be specified using dictionary groups" % ','.join(repr(k) for k in kwargs.keys())) # Check whether the user is specifying targets (such as 'Image.Foo') entries = Store.options().children targets = [k.split('.')[0] in entries for grp in kwargs.values() for k in grp] if any(targets) and not all(targets): raise Exception("Cannot mix target specification keys such as 'Image' with non-target keywords.") elif not any(targets): # Not targets specified - add current object as target sanitized_group = group_sanitizer(self.group) if self.label: identifier = ('%s.%s.%s' % (self.__class__.__name__, sanitized_group, label_sanitizer(self.label))) elif sanitized_group != self.__class__.__name__: identifier = '%s.%s' % (self.__class__.__name__, sanitized_group) else: identifier = self.__class__.__name__ kwargs = {k:{identifier:v} for k,v in kwargs.items()} if options is None and kwargs=={}: deep_clone = self.map(lambda x: x.clone(id=None)) else: deep_clone = self.map(lambda x: x.clone(id=x.id)) StoreOptions.set_options(deep_clone, options, **kwargs) return deep_clone class ViewableElement(Dimensioned): """ A ViewableElement is a dimensioned datastructure that may be associated with a corresponding atomic visualization. An atomic visualization will display the data on a single set of axes (i.e. excludes multiple subplots that are displayed at once). The only new parameter introduced by ViewableElement is the title associated with the object for display. """ __abstract = True _auxiliary_component = False group = param.String(default='ViewableElement', constant=True)
1
13,968
Do you think we might need this anywhere else in HoloViews?
holoviz-holoviews
py