patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -72,9 +72,13 @@ module Beaker def cmd_line host, cmd = @command, env = @environment, pc = @prepend_cmds env_string = host.environment_string( env ) prepend_commands = host.prepend_commands( cmd, pc, :cmd_exe => @cmdexe ) + if host[:platform] =~ /cisco_nexus/ && host[:user] != 'root' + append_command = '"' + cmd = cmd.gsub('"') { '\\"' } + end # This will cause things like `puppet -t -v agent` which is maybe bad. - cmd_line_array = [env_string, prepend_commands, cmd, options_string, args_string] + cmd_line_array = [env_string, prepend_commands, cmd, options_string, args_string, append_command] cmd_line_array.compact.reject( &:empty? ).join( ' ' ) end
1
module Beaker # An object that represents a "command" on a remote host. Is responsible # for munging the environment correctly. Probably poorly named. # # @api public class Command # A string representing the (possibly) incomplete command attr_accessor :command # A hash key-values where the keys are environment variables to be set attr_accessor :environment # A hash of options. Keys with values of nil are considered flags attr_accessor :options # An array of additional arguments to be supplied to the command attr_accessor :args # @param [String] command The program to call, either an absolute path # or one in the PATH (can be overridden) # @param [Array] args These are addition arguments to the command # @param [Hash] options These are addition options to the command. They # will be added in "--key=value" after the command # but before the arguments. There is a special key, # 'ENV', that won't be used as a command option, # but instead can be used to set any default # environment variables # # @example Recommended usage programmatically: # Command.new('git add', files, :patch => true, 'ENV' => {'PATH' => '/opt/csw/bin'}) # # @example My favorite example of a signature that we must maintain # Command.new('puppet', :resource, 'scheduled_task', name, # [ 'ensure=present', # 'command=c:\\\\windows\\\\system32\\\\notepad2.exe', # "arguments=args-#{name}" ] ) # # @note For backwards compatability we must support any number of strings # or symbols (or arrays of strings an symbols) and essentially # ensure they are in a flattened array, coerced to strings, and # call #join(' ') on it. We have options for the command line # invocation that must be turned into '--key=value' and similarly # joined as well as a hash of environment key value pairs, and # finally we need a hash of options to control the default envs that # are included. def initialize command, args = [], options = {} @command = command @options = options @args = args @environment = {} @cmdexe = @options.delete(:cmdexe) || false @prepend_cmds = @options.delete(:prepend_cmds) || nil # this is deprecated and will not allow you to use a command line # option of `--environment`, please use ENV instead. [:ENV, :environment, 'environment', 'ENV'].each do |k| if @options[k].is_a?(Hash) @environment = @environment.merge(@options.delete(k)) end end end # @param [Host] host An object that implements {Beaker::Host}'s # interface. # @param [String] cmd An command to call. # @param [Hash] env An optional hash of environment variables to be used # @param [String] pc An optional list of commands to prepend # # @return [String] This returns the fully formed command line invocation. def cmd_line host, cmd = @command, env = @environment, pc = @prepend_cmds env_string = host.environment_string( env ) prepend_commands = host.prepend_commands( cmd, pc, :cmd_exe => @cmdexe ) # This will cause things like `puppet -t -v agent` which is maybe bad. cmd_line_array = [env_string, prepend_commands, cmd, options_string, args_string] cmd_line_array.compact.reject( &:empty? ).join( ' ' ) end # @param [Hash] opts These are the options that the command takes # # @return [String] String of the options and flags for command. # # @note Why no. Not the least bit Unixy, why do you ask? def options_string opts = @options flags = [] options = opts.dup options.each_key do |key| if options[key] == nil flags << key options.delete(key) end end short_flags, long_flags = flags.partition {|flag| flag.to_s.length == 1 } parsed_short_flags = short_flags.map {|f| "-#{f}" } parsed_long_flags = long_flags.map {|f| "--#{f}" } short_opts, long_opts = {}, {} options.each_key do |key| if key.to_s.length == 1 short_opts[key] = options[key] else long_opts[key] = options[key] end end parsed_short_opts = short_opts.map {|k,v| "-#{k}=#{v}" } parsed_long_opts = long_opts.map {|k,v| "--#{k}=#{v}" } return (parsed_short_flags + parsed_long_flags + parsed_short_opts + parsed_long_opts).join(' ') end # @param [Array] args An array of arguments to the command. # # @return [String] String of the arguments for command. def args_string args = @args args.flatten.compact.join(' ') end end class PuppetCommand < Command def initialize *args command = "puppet #{args.shift}" opts = args.last.is_a?(Hash) ? args.pop : Hash.new opts['ENV'] ||= Hash.new opts[:cmdexe] = true super( command, args, opts ) end end class HostCommand < Command def cmd_line host eval "\"#{@command}\"" end end class SedCommand < Command # sets up a SedCommand for a particular platform # # the purpose is to abstract away platform-dependent details of the sed command # # @param [String] platform The host platform string # @param [String] expression The sed expression # @param [String] filename The file to apply the sed expression to # @param [Hash{Symbol=>String}] opts Additional options # @option opts [String] :temp_file The temp file to use for in-place substitution # (only applies to solaris hosts, they don't provide the -i option) # # @return a new {SedCommand} object def initialize platform, expression, filename, opts = {} command = "sed -i -e \"#{expression}\" #{filename}" if platform =~ /solaris|aix|osx|openbsd/ command.slice! '-i ' temp_file = opts[:temp_file] ? opts[:temp_file] : "#{filename}.tmp" command << " > #{temp_file} && mv #{temp_file} #{filename} && rm -f #{temp_file}" end args = [] opts['ENV'] ||= Hash.new super( command, args, opts ) end end end
1
12,498
Doesn't this go in the opposite direction of #1051 / #1062 by putting platform-specific logic back into this module?
voxpupuli-beaker
rb
@@ -31,7 +31,7 @@ export const refreshAuthentication = async () => { // We should really be using state management. This is terrible. global.googlesitekit.setup = global.googlesitekit.setup || {}; - global.googlesitekit.setup.isAuthenticated = response.isAuthenticated; + global.googlesitekit.setup.authenticated = response.authenticated; global.googlesitekit.setup.requiredScopes = response.requiredScopes; global.googlesitekit.setup.grantedScopes = response.grantedScopes; global.googlesitekit.setup.needReauthenticate = requiredAndGrantedScopes.length < response.requiredScopes.length;
1
/** * Refresh Authentication utility. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Internal dependencies */ import data, { TYPE_CORE } from '../components/data'; export const refreshAuthentication = async () => { try { const response = await data.get( TYPE_CORE, 'user', 'authentication' ); const requiredAndGrantedScopes = response.grantedScopes.filter( ( scope ) => { return -1 !== response.requiredScopes.indexOf( scope ); } ); // We should really be using state management. This is terrible. global.googlesitekit.setup = global.googlesitekit.setup || {}; global.googlesitekit.setup.isAuthenticated = response.isAuthenticated; global.googlesitekit.setup.requiredScopes = response.requiredScopes; global.googlesitekit.setup.grantedScopes = response.grantedScopes; global.googlesitekit.setup.needReauthenticate = requiredAndGrantedScopes.length < response.requiredScopes.length; } catch ( e ) { // eslint-disable-line no-empty } };
1
28,384
I think this is the only line in JS legacy code that should be updated based on the renaming. `global.googlesitekit.setup.isAuthenticated = response.authenticated;` would be sufficient to keep the old name for the legacy JS data (which will be phased out in the long term) while supporting the new name in the API route. All updates to the JS components above wouldn't be necessary.
google-site-kit-wp
js
@@ -36,7 +36,7 @@ function sendCommandToFrame(node, parameters, resolve, reject) { reject(err('No response from frame', node)); } }, 0); - }, 500); + }, parameters.options?.pingWaitTime ?? 500); // send 'axe.ping' to the frame respondable(win, 'axe.ping', null, undefined, () => {
1
import getSelector from './get-selector'; import respondable from './respondable'; import log from '../log'; function err(message, node) { var selector; // TODO: es-modules_tree if (axe._tree) { selector = getSelector(node); } return new Error(message + ': ' + (selector || node)); } /** * Sends a command to an instance of axe in the specified frame * @param {Element} node The frame element to send the message to * @param {Object} parameters Parameters to pass to the frame * @param {Function} callback Function to call when results from the frame has returned */ function sendCommandToFrame(node, parameters, resolve, reject) { var win = node.contentWindow; if (!win) { log('Frame does not have a content window', node); resolve(null); return; } // give the frame .5s to respond to 'axe.ping', else log failed response var timeout = setTimeout(() => { // This double timeout is important for allowing iframes to respond // DO NOT REMOVE timeout = setTimeout(() => { if (!parameters.debug) { resolve(null); } else { reject(err('No response from frame', node)); } }, 0); }, 500); // send 'axe.ping' to the frame respondable(win, 'axe.ping', null, undefined, () => { clearTimeout(timeout); // Give axe 60s (or user-supplied value) to respond to 'axe.start' var frameWaitTime = (parameters.options && parameters.options.frameWaitTime) || 60000; timeout = setTimeout(function collectResultFramesTimeout() { reject(err('Axe in frame timed out', node)); }, frameWaitTime); // send 'axe.start' and send the callback if it responded respondable(win, 'axe.start', parameters, undefined, data => { clearTimeout(timeout); if (data instanceof Error === false) { resolve(data); } else { reject(data); } }); }); } export default sendCommandToFrame;
1
17,243
Technically, this would allow `pingWaitTime` to be `false`. I don't know if it matters here.
dequelabs-axe-core
js
@@ -11,11 +11,15 @@ TEST_DATA = '{"StreamName": "NotExistingStream"}' class KinesisListenerTest(unittest.TestCase): def test_describe_stream_summary_is_redirected(self): - describe_stream_summary_header = {'X-Amz-Target': 'Kinesis_20131202.DescribeStreamSummary'} + if config.KINESIS_PROVIDER == 'kinesalite': + describe_stream_summary_header = {'X-Amz-Target': 'Kinesis_20131202.DescribeStreamSummary'} - response = UPDATE_KINESIS.forward_request('POST', '/', TEST_DATA, describe_stream_summary_header) + response = UPDATE_KINESIS.forward_request('POST', '/', TEST_DATA, + describe_stream_summary_header) - self.assertEqual(response, True) + self.assertEqual(response, True) + else: + self.assertTrue(True) def test_random_error_on_put_record(self): put_record_header = {'X-Amz-Target': 'Kinesis_20131202.PutRecord'}
1
import json import unittest from requests.models import Response from localstack import config from localstack.services.kinesis.kinesis_listener import UPDATE_KINESIS from localstack.utils.common import to_str TEST_DATA = '{"StreamName": "NotExistingStream"}' class KinesisListenerTest(unittest.TestCase): def test_describe_stream_summary_is_redirected(self): describe_stream_summary_header = {'X-Amz-Target': 'Kinesis_20131202.DescribeStreamSummary'} response = UPDATE_KINESIS.forward_request('POST', '/', TEST_DATA, describe_stream_summary_header) self.assertEqual(response, True) def test_random_error_on_put_record(self): put_record_header = {'X-Amz-Target': 'Kinesis_20131202.PutRecord'} config.KINESIS_ERROR_PROBABILITY = 1.0 response = UPDATE_KINESIS.forward_request('POST', '/', TEST_DATA, put_record_header) self.assertEqual(response.status_code, 400) resp_json = json.loads(to_str(response.content)) self.assertEqual(resp_json['ErrorCode'], 'ProvisionedThroughputExceededException') self.assertEqual(resp_json['ErrorMessage'], 'Rate exceeded for shard X in stream Y under account Z.') def test_random_error_on_put_records(self): put_records_header = {'X-Amz-Target': 'Kinesis_20131202.PutRecords'} data_with_one_record = '{"Records": ["test"]}' config.KINESIS_ERROR_PROBABILITY = 1.0 response = UPDATE_KINESIS.forward_request('POST', '/', data_with_one_record, put_records_header) self.assertEqual(response.status_code, 200) resp_json = json.loads(to_str(response.content)) self.assertEqual(resp_json['FailedRecordCount'], 1) self.assertEqual(len(resp_json['Records']), 1) failed_record = resp_json['Records'][0] self.assertEqual(failed_record['ErrorCode'], 'ProvisionedThroughputExceededException') self.assertEqual(failed_record['ErrorMessage'], 'Rate exceeded for shard X in stream Y under account Z.') def test_overwrite_update_shard_count_on_error(self): update_shard_count_header = {'X-Amz-Target': 'Kinesis_20131202.UpdateShardCount'} request_data = '{"StreamName": "TestStream", "TargetShardCount": 2, "ScalingType": "UNIFORM_SCALING"}' error_response = Response() error_response.status_code = 400 response = UPDATE_KINESIS.return_response('POST', '/', request_data, update_shard_count_header, error_response) self.assertEqual(response.status_code, 200) resp_json = json.loads(to_str(response.content)) self.assertEqual(resp_json['StreamName'], 'TestStream') self.assertEqual(resp_json['CurrentShardCount'], 1) self.assertEqual(resp_json['TargetShardCount'], 2)
1
12,569
I only want the proxy request for this to run for kinesalite. So this just returns true if the KINESIS_PROVIDER is kinesis-mock
localstack-localstack
py
@@ -3,7 +3,7 @@ #A part of NonVisual Desktop Access (NVDA) #This file is covered by the GNU General Public License. #See the file COPYING for more details. -#Copyright (C) 2006-2017 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Joseph Lee +#Copyright (C) 2006-2019 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Joseph Lee from copy import deepcopy import os
1
# -*- coding: UTF-8 -*- #synthDriverHandler.py #A part of NonVisual Desktop Access (NVDA) #This file is covered by the GNU General Public License. #See the file COPYING for more details. #Copyright (C) 2006-2017 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Joseph Lee from copy import deepcopy import os import pkgutil import config import baseObject import winVersion import globalVars from logHandler import log from synthSettingsRing import SynthSettingsRing import languageHandler import speechDictHandler import synthDrivers _curSynth=None _audioOutputDevice=None def initialize(): config.addConfigDirsToPythonPackagePath(synthDrivers) config.post_configProfileSwitch.register(handlePostConfigProfileSwitch) def changeVoice(synth, voice): # This function can be called with no voice if the synth doesn't support the voice setting (only has one voice). if voice: synth.voice = voice c=config.conf["speech"][synth.name] c.spec=synth.getConfigSpec() #start or update the synthSettingsRing if globalVars.settingsRing: globalVars.settingsRing.updateSupportedSettings(synth) else: globalVars.settingsRing = SynthSettingsRing(synth) speechDictHandler.loadVoiceDict(synth) def _getSynthDriver(name): return __import__("synthDrivers.%s" % name, globals(), locals(), ("synthDrivers",)).SynthDriver def getSynthList(): synthList=[] # The synth that should be placed at the end of the list. lastSynth = None for loader, name, isPkg in pkgutil.iter_modules(synthDrivers.__path__): if name.startswith('_'): continue try: synth=_getSynthDriver(name) except: log.error("Error while importing SynthDriver %s"%name,exc_info=True) continue try: if synth.check(): if synth.name == "silence": lastSynth = (synth.name,synth.description) else: synthList.append((synth.name,synth.description)) else: log.debugWarning("Synthesizer '%s' doesn't pass the check, excluding from list"%name) except: log.error("",exc_info=True) synthList.sort(key=lambda s : s[1].lower()) if lastSynth: synthList.append(lastSynth) return synthList def getSynth(): return _curSynth def getSynthInstance(name): newSynth=_getSynthDriver(name)() if config.conf["speech"].isSet(name): newSynth.loadSettings() else: # Create the new section. config.conf["speech"][name]={} if newSynth.isSupported("voice"): voice=newSynth.voice else: voice=None # We need to call changeVoice here so that required initialisation can be performed. changeVoice(newSynth,voice) newSynth.saveSettings() #save defaults return newSynth # The synthDrivers that should be used by default. # The first that successfully initializes will be used when config is set to auto (I.e. new installs of NVDA). defaultSynthPriorityList=['espeak','silence'] if winVersion.winVersion.major>=10: # Default to OneCore on Windows 10 and above defaultSynthPriorityList.insert(0,'oneCore') def setSynth(name,isFallback=False): global _curSynth,_audioOutputDevice if name is None: _curSynth.terminate() _curSynth=None return True if name=='auto': name=defaultSynthPriorityList[0] if _curSynth: _curSynth.cancel() _curSynth.terminate() prevSynthName = _curSynth.name _curSynth = None else: prevSynthName = None try: _curSynth=getSynthInstance(name) _audioOutputDevice=config.conf["speech"]["outputDevice"] if not isFallback: config.conf["speech"]["synth"]=name log.info("Loaded synthDriver %s"%name) return True except: log.error("setSynth", exc_info=True) # As there was an error loading this synth: if prevSynthName: # There was a previous synthesizer, so switch back to that one. setSynth(prevSynthName,isFallback=True) else: # There was no previous synth, so fallback to the next available default synthesizer that has not been tried yet. try: nextIndex=defaultSynthPriorityList.index(name)+1 except ValueError: nextIndex=0 if nextIndex<len(defaultSynthPriorityList): newName=defaultSynthPriorityList[nextIndex] setSynth(newName,isFallback=True) return False def handlePostConfigProfileSwitch(): conf = config.conf["speech"] if conf["synth"] != _curSynth.name or conf["outputDevice"] != _audioOutputDevice: setSynth(conf["synth"]) return _curSynth.loadSettings(onlyChanged=True) class SynthSetting(object): """Represents a synthesizer setting such as voice or variant. """ #: Configuration specification of this particular setting for config file validator. #: @type: str configSpec="string(default=None)" def __init__(self,name,displayNameWithAccelerator,availableInSynthSettingsRing=True,displayName=None): """ @param name: internal name of the setting @type name: str @param displayNameWithAccelerator: the localized string shown in voice settings dialog @type displayNameWithAccelerator: str @param displayName: the localized string used in synth settings ring or None to use displayNameWithAccelerator @type displayName: str @param availableInSynthSettingsRing: Will this option be available in synthesizer settings ring? @type availableInSynthSettingsRing: bool """ self.name=name self.displayNameWithAccelerator=displayNameWithAccelerator if not displayName: # Strip accelerator from displayNameWithAccelerator. displayName=displayNameWithAccelerator.replace("&","") self.displayName=displayName self.availableInSynthSettingsRing=availableInSynthSettingsRing class NumericSynthSetting(SynthSetting): """Represents a numeric synthesizer setting such as rate, volume or pitch.""" configSpec="integer(default=50,min=0,max=100)" def __init__(self,name,displayNameWithAccelerator,availableInSynthSettingsRing=True,minStep=1,normalStep=5,largeStep=10,displayName=None): """ @param minStep: Specifies the minimum step between valid values for each numeric setting. For example, if L{minStep} is set to 10, setting values can only be multiples of 10; 10, 20, 30, etc. @type minStep: int @param normalStep: Specifies the step between values that a user will normally prefer. This is used in the settings ring. @type normalStep: int @param largeStep: Specifies the step between values if a large adjustment is desired. This is used for pageUp/pageDown on sliders in the Voice Settings dialog. @type largeStep: int @note: If necessary, the step values will be normalised so that L{minStep} <= L{normalStep} <= L{largeStep}. """ super(NumericSynthSetting,self).__init__(name,displayNameWithAccelerator,availableInSynthSettingsRing=availableInSynthSettingsRing,displayName=displayName) self.minStep=minStep self.normalStep=max(normalStep,minStep) self.largeStep=max(largeStep,self.normalStep) class BooleanSynthSetting(SynthSetting): """Represents a boolean synthesiser setting such as rate boost. """ configSpec = "boolean(default=False)" def __init__(self, name,displayNameWithAccelerator,availableInSynthSettingsRing=False,displayName=None): super(BooleanSynthSetting, self).__init__(name,displayNameWithAccelerator,availableInSynthSettingsRing=availableInSynthSettingsRing,displayName=displayName) class SynthDriver(baseObject.AutoPropertyObject): """Abstract base synthesizer driver. Each synthesizer driver should be a separate Python module in the root synthDrivers directory containing a SynthDriver class which inherits from this base class. At a minimum, synth drivers must set L{name} and L{description} and override the L{check} method. The methods L{speak}, L{cancel} and L{pause} should be overridden as appropriate. L{supportedSettings} should be set as appropriate for the settings supported by the synthesiser. There are factory functions to create L{SynthSetting} instances for common settings; e.g. L{VoiceSetting} and L{RateSetting}. Each setting is retrieved and set using attributes named after the setting; e.g. the L{voice} attribute is used for the L{voice} setting. These will usually be properties. The L{lastIndex} attribute should also be provided. @ivar supportedSettings: The settings supported by the synthesiser. @type supportedSettings: list or tuple of L{SynthSetting} @ivar voice: Unique string identifying the current voice. @type voice: str @ivar availableVoices: The available voices. @type availableVoices: OrderedDict of L{VoiceInfo} keyed by VoiceInfo's ID @ivar pitch: The current pitch; ranges between 0 and 100. @type pitch: int @ivar rate: The current rate; ranges between 0 and 100. @type rate: int @ivar volume: The current volume; ranges between 0 and 100. @type volume: int @ivar variant: The current variant of the voice. @type variant: str @ivar availableVariants: The available variants of the voice. @type availableVariants: OrderedDict of [L{VoiceInfo} keyed by VoiceInfo's ID @ivar inflection: The current inflection; ranges between 0 and 100. @type inflection: int @ivar lastIndex: The index of the chunk of text which was last spoken or C{None} if no index. @type lastIndex: int """ #: The name of the synth; must be the original module file name. #: @type: str name = "" #: A description of the synth. #: @type: str description = "" @classmethod def LanguageSetting(cls): """Factory function for creating a language setting.""" # Translators: Label for a setting in voice settings dialog. return SynthSetting("language",_("&Language"), # Translators: Label for a setting in synth settings ring. displayName=pgettext('synth setting','Language')) @classmethod def VoiceSetting(cls): """Factory function for creating voice setting.""" # Translators: Label for a setting in voice settings dialog. return SynthSetting("voice",_("&Voice"), # Translators: Label for a setting in synth settings ring. displayName=pgettext('synth setting','Voice')) @classmethod def VariantSetting(cls): """Factory function for creating variant setting.""" # Translators: Label for a setting in voice settings dialog. return SynthSetting("variant",_("V&ariant"), # Translators: Label for a setting in synth settings ring. displayName=pgettext('synth setting','Variant')) @classmethod def RateSetting(cls,minStep=1): """Factory function for creating rate setting.""" # Translators: Label for a setting in voice settings dialog. return NumericSynthSetting("rate",_("&Rate"),minStep=minStep, # Translators: Label for a setting in synth settings ring. displayName=pgettext('synth setting','Rate')) @classmethod def VolumeSetting(cls,minStep=1): """Factory function for creating volume setting.""" # Translators: Label for a setting in voice settings dialog. return NumericSynthSetting("volume",_("V&olume"),minStep=minStep,normalStep=10, # Translators: Label for a setting in synth settings ring. displayName=pgettext('synth setting','Volume')) @classmethod def PitchSetting(cls,minStep=1): """Factory function for creating pitch setting.""" # Translators: Label for a setting in voice settings dialog. return NumericSynthSetting("pitch",_("&Pitch"),minStep=minStep, # Translators: Label for a setting in synth settings ring. displayName=pgettext('synth setting','Pitch')) @classmethod def InflectionSetting(cls,minStep=1): """Factory function for creating inflection setting.""" # Translators: Label for a setting in voice settings dialog. return NumericSynthSetting("inflection",_("&Inflection"),minStep=minStep, # Translators: Label for a setting in synth settings ring. displayName=pgettext('synth setting','Inflection')) @classmethod def check(cls): """Determine whether this synth is available. The synth will be excluded from the list of available synths if this method returns C{False}. For example, if this synth requires installation and it is not installed, C{False} should be returned. @return: C{True} if this synth is available, C{False} if not. @rtype: bool """ return False def __init__(self): """Initialize this synth driver. This method can also set default settings for the synthesizer. @raise Exception: If an error occurs. @postcondition: This driver can be used. """ def terminate(self): """Terminate this synth driver. This should be used for any required clean up. @precondition: L{initialize} has been called. @postcondition: This driver can no longer be used. """ def speak(self,speechSequence): """ Speaks the given sequence of text and speech commands. This base implementation will fallback to making use of the old speakText and speakCharacter methods. But new synths should override this method to support its full functionality. @param speechSequence: a list of text strings and SpeechCommand objects (such as index and parameter changes). @type speechSequence: list of string and L{speechCommand} """ import speech lastIndex=None text="" origSpeakFunc=self.speakText speechSequence=iter(speechSequence) while True: item = next(speechSequence,None) if text and (item is None or isinstance(item,(speech.IndexCommand,speech.CharacterModeCommand))): # Either we're about to handle a command or this is the end of the sequence. # Speak the text since the last command we handled. origSpeakFunc(text,index=lastIndex) text="" lastIndex=None if item is None: # No more items. break if isinstance(item,basestring): # Merge the text between commands into a single chunk. text+=item elif isinstance(item,speech.IndexCommand): lastIndex=item.index elif isinstance(item,speech.CharacterModeCommand): origSpeakFunc=self.speakCharacter if item.state else self.speakText elif isinstance(item,speech.SpeechCommand): log.debugWarning("Unknown speech command: %s"%item) else: log.error("Unknown item in speech sequence: %s"%item) def speakText(self, text, index=None): """Speak some text. This method is deprecated. Instead implement speak. @param text: The chunk of text to speak. @type text: str @param index: An index (bookmark) to associate with this chunk of text, C{None} if no index. @type index: int @note: If C{index} is provided, the C{lastIndex} property should return this index when the synth is speaking this chunk of text. """ raise NotImplementedError def speakCharacter(self, character, index=None): """Speak some character. This method is deprecated. Instead implement speak. @param character: The character to speak. @type character: str @param index: An index (bookmark) to associate with this chunk of speech, C{None} if no index. @type index: int @note: If C{index} is provided, the C{lastIndex} property should return this index when the synth is speaking this chunk of text. """ self.speakText(character,index) def _get_lastIndex(self): """Obtain the index of the chunk of text which was last spoken. When the synth speaks text associated with a particular index, this method should return that index. That is, this property should update for each chunk of text spoken by the synth. @return: The index or C{None} if no index. @rtype: int """ return None def cancel(self): """Silence speech immediately. """ def _get_language(self): return self.availableVoices[self.voice].language def _set_language(self,language): raise NotImplementedError def _get_availableLanguages(self): raise NotImplementedError def _get_voice(self): raise NotImplementedError def _set_voice(self, value): pass def _getAvailableVoices(self): """fetches an ordered dictionary of voices that the synth supports. @returns: an OrderedDict of L{VoiceInfo} instances representing the available voices, keyed by ID @rtype: OrderedDict """ raise NotImplementedError def _get_availableVoices(self): if not hasattr(self,'_availableVoices'): self._availableVoices=self._getAvailableVoices() return self._availableVoices def _get_rate(self): return 0 def _set_rate(self, value): pass def _get_pitch(self): return 0 def _set_pitch(self, value): pass def _get_volume(self): return 0 def _set_volume(self, value): pass def _get_variant(self): raise NotImplementedError def _set_variant(self, value): pass def _getAvailableVariants(self): """fetches an ordered dictionary of variants that the synth supports, keyed by ID @returns: an ordered dictionary of L{VoiceInfo} instances representing the available variants @rtype: OrderedDict """ raise NotImplementedError def _get_availableVariants(self): if not hasattr(self,'_availableVariants'): self._availableVariants=self._getAvailableVariants() return self._availableVariants def _get_supportedSettings(self): raise NotImplementedError def getConfigSpec(self): spec=deepcopy(config.confspec["speech"]["__many__"]) for setting in self.supportedSettings: spec[setting.name]=setting.configSpec return spec def _get_inflection(self): return 0 def _set_inflection(self, value): pass def pause(self, switch): """Pause or resume speech output. @param switch: C{True} to pause, C{False} to resume (unpause). @type switch: bool """ pass @classmethod def _paramToPercent(cls, current, min, max): """Convert a raw parameter value to a percentage given the current, minimum and maximum raw values. @param current: The current value. @type current: int @param min: The minimum value. @type current: int @param max: The maximum value. @type max: int """ return int(round(float(current - min) / (max - min) * 100)) @classmethod def _percentToParam(cls, percent, min, max): """Convert a percentage to a raw parameter value given the current percentage and the minimum and maximum raw parameter values. @param percent: The current percentage. @type percent: int @param min: The minimum raw parameter value. @type min: int @param max: The maximum raw parameter value. @type max: int """ return int(round(float(percent) / 100 * (max - min) + min)) def isSupported(self,settingName): """Checks whether given setting is supported by the synthesizer. @rtype: l{bool} """ for s in self.supportedSettings: if s.name==settingName: return True return False def saveSettings(self): conf=config.conf["speech"][self.name] for setting in self.supportedSettings: conf[setting.name]=getattr(self,setting.name) def loadSettings(self, onlyChanged=False): c=config.conf["speech"][self.name] if self.isSupported("voice"): voice=c.get("voice",None) if not onlyChanged or self.voice!=voice: try: changeVoice(self,voice) except: log.warning("Invalid voice: %s" % voice) # Update the configuration with the correct voice. c["voice"]=self.voice # We need to call changeVoice here so that required initialisation can be performed. changeVoice(self,self.voice) elif not onlyChanged: changeVoice(self,None) for s in self.supportedSettings: if s.name=="voice" or c[s.name] is None: continue val=c[s.name] if onlyChanged and getattr(self,s.name)==val: continue setattr(self,s.name,val) def _get_initialSettingsRingSetting (self): if not self.isSupported("rate") and len(self.supportedSettings)>0: #Choose first as an initial one for i,s in enumerate(self.supportedSettings): if s.availableInSynthSettingsRing: return i return None for i,s in enumerate(self.supportedSettings): if s.name=="rate": return i return None class StringParameterInfo(object): """ The base class used to represent a value of a string synth setting. """ def __init__(self,ID,name): #: The unique identifier of the value. #: @type: str self.ID=ID #: The name of the value, visible to the user. #: @type: str self.name=name class VoiceInfo(StringParameterInfo): """Provides information about a single synthesizer voice. """ def __init__(self,ID,name,language=None): #: The ID of the language this voice speaks, or None if not known or the synth implements language separate from voices self.language=language super(VoiceInfo,self).__init__(ID,name) class LanguageInfo(StringParameterInfo): """Holds information for a particular language""" def __init__(self,ID): """Given a language ID (locale name) the description is automatically calculated.""" name=languageHandler.getLanguageDescription(ID) super(LanguageInfo,self).__init__(ID,name)
1
24,937
Please add your name to the list of copyright holders.
nvaccess-nvda
py
@@ -50,6 +50,12 @@ public class IpPortManager { private boolean autoDiscoveryInited = false; + private int maxRetryTimes; + + public int getMaxRetryTimes() { + return maxRetryTimes; + } + public IpPortManager(ServiceRegistryConfig serviceRegistryConfig, InstanceCacheManager instanceCacheManager) { this.serviceRegistryConfig = serviceRegistryConfig; this.instanceCacheManager = instanceCacheManager;
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.serviceregistry.client; import static org.apache.servicecomb.serviceregistry.api.Const.REGISTRY_APP_ID; import static org.apache.servicecomb.serviceregistry.api.Const.REGISTRY_SERVICE_NAME; import java.util.ArrayList; import java.util.List; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; import org.apache.servicecomb.foundation.common.net.IpPort; import org.apache.servicecomb.foundation.common.net.URIEndpointObject; import org.apache.servicecomb.serviceregistry.cache.CacheEndpoint; import org.apache.servicecomb.serviceregistry.cache.InstanceCache; import org.apache.servicecomb.serviceregistry.cache.InstanceCacheManager; import org.apache.servicecomb.serviceregistry.config.ServiceRegistryConfig; import org.apache.servicecomb.serviceregistry.definition.DefinitionConst; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class IpPortManager { private static final Logger LOGGER = LoggerFactory.getLogger(IpPortManager.class); private ServiceRegistryConfig serviceRegistryConfig; private InstanceCacheManager instanceCacheManager; private String defaultTransport = "rest"; private ArrayList<IpPort> defaultIpPort; private AtomicInteger currentAvailableIndex; private boolean autoDiscoveryInited = false; public IpPortManager(ServiceRegistryConfig serviceRegistryConfig, InstanceCacheManager instanceCacheManager) { this.serviceRegistryConfig = serviceRegistryConfig; this.instanceCacheManager = instanceCacheManager; defaultTransport = serviceRegistryConfig.getTransport(); defaultIpPort = serviceRegistryConfig.getIpPort(); if (defaultIpPort.size() == 0) { throw new IllegalArgumentException("Service center address is required to start the application."); } int initialIndex = new Random().nextInt(defaultIpPort.size()); currentAvailableIndex = new AtomicInteger(initialIndex); } // we have to do this operation after the first time setup has already done public void initAutoDiscovery() { if (!autoDiscoveryInited && this.serviceRegistryConfig.isRegistryAutoDiscovery()) { instanceCacheManager.getOrCreate(REGISTRY_APP_ID, REGISTRY_SERVICE_NAME, DefinitionConst.VERSION_RULE_LATEST); autoDiscoveryInited = true; } } public IpPort getNextAvailableAddress(IpPort failedIpPort) { int currentIndex = currentAvailableIndex.get(); IpPort current = getAvailableAddress(currentIndex); if (current.equals(failedIpPort)) { currentAvailableIndex.compareAndSet(currentIndex, currentIndex + 1); current = getAvailableAddress(); } LOGGER.info("Change service center address from {} to {}", failedIpPort.toString(), current.toString()); return current; } public IpPort getAvailableAddress() { return getAvailableAddress(currentAvailableIndex.get()); } private IpPort getAvailableAddress(int index) { if (index < defaultIpPort.size()) { return defaultIpPort.get(index); } List<CacheEndpoint> endpoints = getDiscoveredIpPort(); if (endpoints == null || (index >= defaultIpPort.size() + endpoints.size())) { currentAvailableIndex.set(0); return defaultIpPort.get(0); } CacheEndpoint nextEndpoint = endpoints.get(index - defaultIpPort.size()); return new URIEndpointObject(nextEndpoint.getEndpoint()); } private List<CacheEndpoint> getDiscoveredIpPort() { if (!autoDiscoveryInited || !this.serviceRegistryConfig.isRegistryAutoDiscovery()) { return null; } InstanceCache instanceCache = instanceCacheManager.getOrCreate(REGISTRY_APP_ID, REGISTRY_SERVICE_NAME, DefinitionConst.VERSION_RULE_LATEST); return instanceCache.getOrCreateTransportMap().get(defaultTransport); } }
1
9,187
Maybe maxRetryTimes should be bigger than 2. Return Max(maxRestryTime, 2)?
apache-servicecomb-java-chassis
java
@@ -439,7 +439,7 @@ type CloneLink struct { Git string } -func (repo *Repository) cloneLink(isWiki bool) *CloneLink { +func (repo *Repository) cloneLink(isWiki bool, signedUserName string) *CloneLink { repoName := repo.Name if isWiki { repoName += ".wiki"
1
// Copyright 2014 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package models import ( "bytes" "errors" "fmt" "html/template" "io/ioutil" "os" "os/exec" "path" "path/filepath" "regexp" "sort" "strings" "sync" "time" "unicode/utf8" "github.com/Unknwon/cae/zip" "github.com/Unknwon/com" "github.com/go-xorm/xorm" "github.com/mcuadros/go-version" "gopkg.in/ini.v1" "github.com/gogits/git-module" api "github.com/gogits/go-gogs-client" "github.com/gogits/gogs/modules/base" "github.com/gogits/gogs/modules/bindata" "github.com/gogits/gogs/modules/log" "github.com/gogits/gogs/modules/process" "github.com/gogits/gogs/modules/setting" ) const ( _TPL_UPDATE_HOOK = "#!/usr/bin/env %s\n%s update $1 $2 $3 --config='%s'\n" ) var ( ErrRepoFileNotExist = errors.New("Repository file does not exist") ErrRepoFileNotLoaded = errors.New("Repository file not loaded") ErrMirrorNotExist = errors.New("Mirror does not exist") ErrInvalidReference = errors.New("Invalid reference specified") ErrNameEmpty = errors.New("Name is empty") ) var ( Gitignores, Licenses, Readmes []string // Maximum items per page in forks, watchers and stars of a repo ItemsPerPage = 40 ) func LoadRepoConfig() { // Load .gitignore and license files and readme templates. types := []string{"gitignore", "license", "readme"} typeFiles := make([][]string, 3) for i, t := range types { files, err := bindata.AssetDir("conf/" + t) if err != nil { log.Fatal(4, "Fail to get %s files: %v", t, err) } customPath := path.Join(setting.CustomPath, "conf", t) if com.IsDir(customPath) { customFiles, err := com.StatDir(customPath) if err != nil { log.Fatal(4, "Fail to get custom %s files: %v", t, err) } for _, f := range customFiles { if !com.IsSliceContainsStr(files, f) { files = append(files, f) } } } typeFiles[i] = files } Gitignores = typeFiles[0] Licenses = typeFiles[1] Readmes = typeFiles[2] sort.Strings(Gitignores) sort.Strings(Licenses) sort.Strings(Readmes) } func NewRepoContext() { zip.Verbose = false // Check Git installation. if _, err := exec.LookPath("git"); err != nil { log.Fatal(4, "Fail to test 'git' command: %v (forgotten install?)", err) } // Check Git version. gitVer, err := git.BinVersion() if err != nil { log.Fatal(4, "Fail to get Git version: %v", err) } log.Info("Git Version: %s", gitVer) if version.Compare("1.7.1", gitVer, ">") { log.Fatal(4, "Gogs requires Git version greater or equal to 1.7.1") } // Git requires setting user.name and user.email in order to commit changes. for configKey, defaultValue := range map[string]string{"user.name": "Gogs", "user.email": "[email protected]"} { if stdout, stderr, err := process.Exec("NewRepoContext(get setting)", "git", "config", "--get", configKey); err != nil || strings.TrimSpace(stdout) == "" { // ExitError indicates this config is not set if _, ok := err.(*exec.ExitError); ok || strings.TrimSpace(stdout) == "" { if _, stderr, gerr := process.Exec("NewRepoContext(set "+configKey+")", "git", "config", "--global", configKey, defaultValue); gerr != nil { log.Fatal(4, "Fail to set git %s(%s): %s", configKey, gerr, stderr) } log.Info("Git config %s set to %s", configKey, defaultValue) } else { log.Fatal(4, "Fail to get git %s(%s): %s", configKey, err, stderr) } } } // Set git some configurations. if _, stderr, err := process.Exec("NewRepoContext(git config --global core.quotepath false)", "git", "config", "--global", "core.quotepath", "false"); err != nil { log.Fatal(4, "Fail to execute 'git config --global core.quotepath false': %s", stderr) } // Clean up temporary data. os.RemoveAll(filepath.Join(setting.AppDataPath, "tmp")) } // Repository represents a git repository. type Repository struct { ID int64 `xorm:"pk autoincr"` OwnerID int64 `xorm:"UNIQUE(s)"` Owner *User `xorm:"-"` LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` Name string `xorm:"INDEX NOT NULL"` Description string Website string DefaultBranch string NumWatches int NumStars int NumForks int NumIssues int NumClosedIssues int NumOpenIssues int `xorm:"-"` NumPulls int NumClosedPulls int NumOpenPulls int `xorm:"-"` NumMilestones int `xorm:"NOT NULL DEFAULT 0"` NumClosedMilestones int `xorm:"NOT NULL DEFAULT 0"` NumOpenMilestones int `xorm:"-"` NumTags int `xorm:"-"` IsPrivate bool IsBare bool IsMirror bool *Mirror `xorm:"-"` // Advanced settings EnableWiki bool `xorm:"NOT NULL DEFAULT true"` EnableExternalWiki bool ExternalWikiURL string EnableIssues bool `xorm:"NOT NULL DEFAULT true"` EnableExternalTracker bool ExternalTrackerFormat string ExternalMetas map[string]string `xorm:"-"` EnablePulls bool `xorm:"NOT NULL DEFAULT true"` IsFork bool `xorm:"NOT NULL DEFAULT false"` ForkID int64 BaseRepo *Repository `xorm:"-"` Created time.Time `xorm:"CREATED"` Updated time.Time `xorm:"UPDATED"` } func (repo *Repository) AfterSet(colName string, _ xorm.Cell) { switch colName { case "num_closed_issues": repo.NumOpenIssues = repo.NumIssues - repo.NumClosedIssues case "num_closed_pulls": repo.NumOpenPulls = repo.NumPulls - repo.NumClosedPulls case "num_closed_milestones": repo.NumOpenMilestones = repo.NumMilestones - repo.NumClosedMilestones case "updated": repo.Updated = regulateTimeZone(repo.Updated) } } func (repo *Repository) getOwner(e Engine) (err error) { if repo.Owner != nil { return nil } repo.Owner, err = getUserByID(e, repo.OwnerID) return err } func (repo *Repository) GetOwner() error { return repo.getOwner(x) } func (repo *Repository) mustOwner(e Engine) *User { if err := repo.getOwner(e); err != nil { return &User{ Name: "error", FullName: err.Error(), } } return repo.Owner } // MustOwner always returns a valid *User object to avoid // conceptually impossible error handling. // It creates a fake object that contains error deftail // when error occurs. func (repo *Repository) MustOwner() *User { return repo.mustOwner(x) } // ComposeMetas composes a map of metas for rendering external issue tracker URL. func (repo *Repository) ComposeMetas() map[string]string { if !repo.EnableExternalTracker { return nil } else if repo.ExternalMetas == nil { repo.ExternalMetas = map[string]string{ "format": repo.ExternalTrackerFormat, "user": repo.MustOwner().Name, "repo": repo.Name, } } return repo.ExternalMetas } // GetAssignees returns all users that have write access of repository. func (repo *Repository) GetAssignees() (_ []*User, err error) { if err = repo.GetOwner(); err != nil { return nil, err } accesses := make([]*Access, 0, 10) if err = x.Where("repo_id=? AND mode>=?", repo.ID, ACCESS_MODE_WRITE).Find(&accesses); err != nil { return nil, err } users := make([]*User, 0, len(accesses)+1) // Just waste 1 unit does not matter. if !repo.Owner.IsOrganization() { users = append(users, repo.Owner) } var u *User for i := range accesses { u, err = GetUserByID(accesses[i].UserID) if err != nil { return nil, err } users = append(users, u) } return users, nil } // GetAssigneeByID returns the user that has write access of repository by given ID. func (repo *Repository) GetAssigneeByID(userID int64) (*User, error) { return GetAssigneeByID(repo, userID) } // GetMilestoneByID returns the milestone belongs to repository by given ID. func (repo *Repository) GetMilestoneByID(milestoneID int64) (*Milestone, error) { return GetRepoMilestoneByID(repo.ID, milestoneID) } // IssueStats returns number of open and closed repository issues by given filter mode. func (repo *Repository) IssueStats(uid int64, filterMode int, isPull bool) (int64, int64) { return GetRepoIssueStats(repo.ID, uid, filterMode, isPull) } func (repo *Repository) GetMirror() (err error) { repo.Mirror, err = GetMirror(repo.ID) return err } func (repo *Repository) GetBaseRepo() (err error) { if !repo.IsFork { return nil } repo.BaseRepo, err = GetRepositoryByID(repo.ForkID) return err } func (repo *Repository) repoPath(e Engine) string { return RepoPath(repo.mustOwner(e).Name, repo.Name) } func (repo *Repository) RepoPath() string { return repo.repoPath(x) } func (repo *Repository) GitConfigPath() string { return filepath.Join(repo.RepoPath(), "config") } func (repo *Repository) RepoLink() string { return setting.AppSubUrl + "/" + repo.MustOwner().Name + "/" + repo.Name } func (repo *Repository) ComposeCompareURL(oldCommitID, newCommitID string) string { return fmt.Sprintf("%s/%s/compare/%s...%s", repo.MustOwner().Name, repo.Name, oldCommitID, newCommitID) } func (repo *Repository) FullRepoLink() string { return setting.AppUrl + repo.MustOwner().Name + "/" + repo.Name } func (repo *Repository) HasAccess(u *User) bool { has, _ := HasAccess(u, repo, ACCESS_MODE_READ) return has } func (repo *Repository) IsOwnedBy(userID int64) bool { return repo.OwnerID == userID } // CanBeForked returns true if repository meets the requirements of being forked. func (repo *Repository) CanBeForked() bool { return !repo.IsBare && !repo.IsMirror } func (repo *Repository) NextIssueIndex() int64 { return int64(repo.NumIssues+repo.NumPulls) + 1 } var ( DescPattern = regexp.MustCompile(`https?://\S+`) ) // DescriptionHtml does special handles to description and return HTML string. func (repo *Repository) DescriptionHtml() template.HTML { sanitize := func(s string) string { return fmt.Sprintf(`<a href="%[1]s" target="_blank">%[1]s</a>`, s) } return template.HTML(DescPattern.ReplaceAllStringFunc(base.Sanitizer.Sanitize(repo.Description), sanitize)) } func (repo *Repository) LocalCopyPath() string { return path.Join(setting.AppDataPath, "tmp/local", com.ToStr(repo.ID)) } func updateLocalCopy(repoPath, localPath string) error { if !com.IsExist(localPath) { if err := git.Clone(repoPath, localPath, git.CloneRepoOptions{}); err != nil { return fmt.Errorf("Clone: %v", err) } } else { if err := git.Pull(localPath, true); err != nil { return fmt.Errorf("Pull: %v", err) } } return nil } // UpdateLocalCopy makes sure the local copy of repository is up-to-date. func (repo *Repository) UpdateLocalCopy() error { return updateLocalCopy(repo.RepoPath(), repo.LocalCopyPath()) } // PatchPath returns corresponding patch file path of repository by given issue ID. func (repo *Repository) PatchPath(index int64) (string, error) { if err := repo.GetOwner(); err != nil { return "", err } return filepath.Join(RepoPath(repo.Owner.Name, repo.Name), "pulls", com.ToStr(index)+".patch"), nil } // SavePatch saves patch data to corresponding location by given issue ID. func (repo *Repository) SavePatch(index int64, patch []byte) error { patchPath, err := repo.PatchPath(index) if err != nil { return fmt.Errorf("PatchPath: %v", err) } os.MkdirAll(filepath.Dir(patchPath), os.ModePerm) if err = ioutil.WriteFile(patchPath, patch, 0644); err != nil { return fmt.Errorf("WriteFile: %v", err) } return nil } // ComposePayload composes and returns *api.PayloadRepo corresponding to the repository. func (repo *Repository) ComposePayload() *api.PayloadRepo { cl := repo.CloneLink() return &api.PayloadRepo{ ID: repo.ID, Name: repo.Name, URL: repo.FullRepoLink(), SSHURL: cl.SSH, CloneURL: cl.HTTPS, Description: repo.Description, Website: repo.Website, Watchers: repo.NumWatches, Owner: &api.PayloadAuthor{ Name: repo.MustOwner().DisplayName(), Email: repo.MustOwner().Email, UserName: repo.MustOwner().Name, }, Private: repo.IsPrivate, DefaultBranch: repo.DefaultBranch, } } func isRepositoryExist(e Engine, u *User, repoName string) (bool, error) { has, err := e.Get(&Repository{ OwnerID: u.Id, LowerName: strings.ToLower(repoName), }) return has && com.IsDir(RepoPath(u.Name, repoName)), err } // IsRepositoryExist returns true if the repository with given name under user has already existed. func IsRepositoryExist(u *User, repoName string) (bool, error) { return isRepositoryExist(x, u, repoName) } // CloneLink represents different types of clone URLs of repository. type CloneLink struct { SSH string HTTPS string Git string } func (repo *Repository) cloneLink(isWiki bool) *CloneLink { repoName := repo.Name if isWiki { repoName += ".wiki" } repo.Owner = repo.MustOwner() cl := new(CloneLink) if setting.SSHPort != 22 { cl.SSH = fmt.Sprintf("ssh://%s@%s:%d/%s/%s.git", setting.RunUser, setting.SSHDomain, setting.SSHPort, repo.Owner.Name, repoName) } else { cl.SSH = fmt.Sprintf("%s@%s:%s/%s.git", setting.RunUser, setting.SSHDomain, repo.Owner.Name, repoName) } cl.HTTPS = fmt.Sprintf("%s%s/%s.git", setting.AppUrl, repo.Owner.Name, repoName) return cl } // CloneLink returns clone URLs of repository. func (repo *Repository) CloneLink() (cl *CloneLink) { return repo.cloneLink(false) } var ( reservedNames = []string{"debug", "raw", "install", "api", "avatar", "user", "org", "help", "stars", "issues", "pulls", "commits", "repo", "template", "admin", "new"} reservedPatterns = []string{"*.git", "*.keys", "*.wiki"} ) // IsUsableName checks if name is reserved or pattern of name is not allowed. func IsUsableName(name string) error { name = strings.TrimSpace(strings.ToLower(name)) if utf8.RuneCountInString(name) == 0 { return ErrNameEmpty } for i := range reservedNames { if name == reservedNames[i] { return ErrNameReserved{name} } } for _, pat := range reservedPatterns { if pat[0] == '*' && strings.HasSuffix(name, pat[1:]) || (pat[len(pat)-1] == '*' && strings.HasPrefix(name, pat[:len(pat)-1])) { return ErrNamePatternNotAllowed{pat} } } return nil } // Mirror represents a mirror information of repository. type Mirror struct { ID int64 `xorm:"pk autoincr"` RepoID int64 Repo *Repository `xorm:"-"` Interval int // Hour. Updated time.Time `xorm:"UPDATED"` NextUpdate time.Time address string `xorm:"-"` } func (m *Mirror) AfterSet(colName string, _ xorm.Cell) { var err error switch colName { case "repo_id": m.Repo, err = GetRepositoryByID(m.RepoID) if err != nil { log.Error(3, "GetRepositoryByID[%d]: %v", m.ID, err) } } } func (m *Mirror) readAddress() { if len(m.address) > 0 { return } cfg, err := ini.Load(m.Repo.GitConfigPath()) if err != nil { log.Error(4, "Load: %v", err) return } m.address = cfg.Section("remote \"origin\"").Key("url").Value() } // HandleCloneUserCredentials replaces user credentials from HTTP/HTTPS URL // with placeholder <credentials>. // It will fail for any other forms of clone addresses. func HandleCloneUserCredentials(url string, mosaics bool) string { i := strings.Index(url, "@") if i == -1 { return url } start := strings.Index(url, "://") if start == -1 { return url } if mosaics { return url[:start+3] + "<credentials>" + url[i:] } return url[:start+3] + url[i+1:] } // Address returns mirror address from Git repository config without credentials. func (m *Mirror) Address() string { m.readAddress() return HandleCloneUserCredentials(m.address, false) } // FullAddress returns mirror address from Git repository config. func (m *Mirror) FullAddress() string { m.readAddress() return m.address } // SaveAddress writes new address to Git repository config. func (m *Mirror) SaveAddress(addr string) error { configPath := m.Repo.GitConfigPath() cfg, err := ini.Load(configPath) if err != nil { return fmt.Errorf("Load: %v", err) } cfg.Section("remote \"origin\"").Key("url").SetValue(addr) return cfg.SaveToIndent(configPath, "\t") } func getMirror(e Engine, repoId int64) (*Mirror, error) { m := &Mirror{RepoID: repoId} has, err := e.Get(m) if err != nil { return nil, err } else if !has { return nil, ErrMirrorNotExist } return m, nil } // GetMirror returns mirror object by given repository ID. func GetMirror(repoId int64) (*Mirror, error) { return getMirror(x, repoId) } func updateMirror(e Engine, m *Mirror) error { _, err := e.Id(m.ID).Update(m) return err } func UpdateMirror(m *Mirror) error { return updateMirror(x, m) } func DeleteMirrorByRepoID(repoID int64) error { _, err := x.Delete(&Mirror{RepoID: repoID}) return err } func createUpdateHook(repoPath string) error { return git.SetUpdateHook(repoPath, fmt.Sprintf(_TPL_UPDATE_HOOK, setting.ScriptType, "\""+setting.AppPath+"\"", setting.CustomConf)) } type MigrateRepoOptions struct { Name string Description string IsPrivate bool IsMirror bool RemoteAddr string } // MigrateRepository migrates a existing repository from other project hosting. func MigrateRepository(u *User, opts MigrateRepoOptions) (*Repository, error) { repo, err := CreateRepository(u, CreateRepoOptions{ Name: opts.Name, Description: opts.Description, IsPrivate: opts.IsPrivate, IsMirror: opts.IsMirror, }) if err != nil { return nil, err } // Clone to temprory path and do the init commit. tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("%d", time.Now().Nanosecond())) os.MkdirAll(tmpDir, os.ModePerm) repoPath := RepoPath(u.Name, opts.Name) if u.IsOrganization() { t, err := u.GetOwnerTeam() if err != nil { return nil, err } repo.NumWatches = t.NumMembers } else { repo.NumWatches = 1 } os.RemoveAll(repoPath) if err = git.Clone(opts.RemoteAddr, repoPath, git.CloneRepoOptions{ Mirror: true, Quiet: true, Timeout: 10 * time.Minute, }); err != nil { return repo, fmt.Errorf("Clone: %v", err) } if opts.IsMirror { if _, err = x.InsertOne(&Mirror{ RepoID: repo.ID, Interval: 24, NextUpdate: time.Now().Add(24 * time.Hour), }); err != nil { return repo, fmt.Errorf("InsertOne: %v", err) } repo.IsMirror = true return repo, UpdateRepository(repo, false) } return CleanUpMigrateInfo(repo, repoPath) } // Finish migrating repository with things that don't need to be done for mirrors. func CleanUpMigrateInfo(repo *Repository, repoPath string) (*Repository, error) { if err := createUpdateHook(repoPath); err != nil { return repo, fmt.Errorf("createUpdateHook: %v", err) } // Clean up mirror info which prevents "push --all". // This also removes possible user credentials. configPath := repo.GitConfigPath() cfg, err := ini.Load(configPath) if err != nil { return repo, fmt.Errorf("open config file: %v", err) } cfg.DeleteSection("remote \"origin\"") if err = cfg.SaveToIndent(configPath, "\t"); err != nil { return repo, fmt.Errorf("save config file: %v", err) } // Check if repository is empty. _, stderr, err := com.ExecCmdDir(repoPath, "git", "log", "-1") if err != nil { if strings.Contains(stderr, "fatal: bad default revision 'HEAD'") { repo.IsBare = true } else { return repo, fmt.Errorf("check bare: %v - %s", err, stderr) } } // Try to get HEAD branch and set it as default branch. gitRepo, err := git.OpenRepository(repoPath) if err != nil { log.Error(4, "OpenRepository: %v", err) return repo, nil } headBranch, err := gitRepo.GetHEADBranch() if err != nil { log.Error(4, "GetHEADBranch: %v", err) return repo, nil } if headBranch != nil { repo.DefaultBranch = headBranch.Name } return repo, UpdateRepository(repo, false) } // initRepoCommit temporarily changes with work directory. func initRepoCommit(tmpPath string, sig *git.Signature) (err error) { var stderr string if _, stderr, err = process.ExecDir(-1, tmpPath, fmt.Sprintf("initRepoCommit (git add): %s", tmpPath), "git", "add", "--all"); err != nil { return fmt.Errorf("git add: %s", stderr) } if _, stderr, err = process.ExecDir(-1, tmpPath, fmt.Sprintf("initRepoCommit (git commit): %s", tmpPath), "git", "commit", fmt.Sprintf("--author='%s <%s>'", sig.Name, sig.Email), "-m", "initial commit"); err != nil { return fmt.Errorf("git commit: %s", stderr) } if _, stderr, err = process.ExecDir(-1, tmpPath, fmt.Sprintf("initRepoCommit (git push): %s", tmpPath), "git", "push", "origin", "master"); err != nil { return fmt.Errorf("git push: %s", stderr) } return nil } type CreateRepoOptions struct { Name string Description string Gitignores string License string Readme string IsPrivate bool IsMirror bool AutoInit bool } func getRepoInitFile(tp, name string) ([]byte, error) { relPath := path.Join("conf", tp, name) // Use custom file when available. customPath := path.Join(setting.CustomPath, relPath) if com.IsFile(customPath) { return ioutil.ReadFile(customPath) } return bindata.Asset(relPath) } func prepareRepoCommit(repo *Repository, tmpDir, repoPath string, opts CreateRepoOptions) error { // Clone to temprory path and do the init commit. _, stderr, err := process.Exec( fmt.Sprintf("initRepository(git clone): %s", repoPath), "git", "clone", repoPath, tmpDir) if err != nil { return fmt.Errorf("git clone: %v - %s", err, stderr) } // README data, err := getRepoInitFile("readme", opts.Readme) if err != nil { return fmt.Errorf("getRepoInitFile[%s]: %v", opts.Readme, err) } cloneLink := repo.CloneLink() match := map[string]string{ "Name": repo.Name, "Description": repo.Description, "CloneURL.SSH": cloneLink.SSH, "CloneURL.HTTPS": cloneLink.HTTPS, } if err = ioutil.WriteFile(filepath.Join(tmpDir, "README.md"), []byte(com.Expand(string(data), match)), 0644); err != nil { return fmt.Errorf("write README.md: %v", err) } // .gitignore if len(opts.Gitignores) > 0 { var buf bytes.Buffer names := strings.Split(opts.Gitignores, ",") for _, name := range names { data, err = getRepoInitFile("gitignore", name) if err != nil { return fmt.Errorf("getRepoInitFile[%s]: %v", name, err) } buf.WriteString("# ---> " + name + "\n") buf.Write(data) buf.WriteString("\n") } if buf.Len() > 0 { if err = ioutil.WriteFile(filepath.Join(tmpDir, ".gitignore"), buf.Bytes(), 0644); err != nil { return fmt.Errorf("write .gitignore: %v", err) } } } // LICENSE if len(opts.License) > 0 { data, err = getRepoInitFile("license", opts.License) if err != nil { return fmt.Errorf("getRepoInitFile[%s]: %v", opts.License, err) } if err = ioutil.WriteFile(filepath.Join(tmpDir, "LICENSE"), data, 0644); err != nil { return fmt.Errorf("write LICENSE: %v", err) } } return nil } // InitRepository initializes README and .gitignore if needed. func initRepository(e Engine, repoPath string, u *User, repo *Repository, opts CreateRepoOptions) (err error) { // Somehow the directory could exist. if com.IsExist(repoPath) { return fmt.Errorf("initRepository: path already exists: %s", repoPath) } // Init bare new repository. if err = git.InitRepository(repoPath, true); err != nil { return fmt.Errorf("InitRepository: %v", err) } else if err = createUpdateHook(repoPath); err != nil { return fmt.Errorf("createUpdateHook: %v", err) } tmpDir := filepath.Join(os.TempDir(), "gogs-"+repo.Name+"-"+com.ToStr(time.Now().Nanosecond())) // Initialize repository according to user's choice. if opts.AutoInit { os.MkdirAll(tmpDir, os.ModePerm) defer os.RemoveAll(tmpDir) if err = prepareRepoCommit(repo, tmpDir, repoPath, opts); err != nil { return fmt.Errorf("prepareRepoCommit: %v", err) } // Apply changes and commit. if err = initRepoCommit(tmpDir, u.NewGitSig()); err != nil { return fmt.Errorf("initRepoCommit: %v", err) } } // Re-fetch the repository from database before updating it (else it would // override changes that were done earlier with sql) if repo, err = getRepositoryByID(e, repo.ID); err != nil { return fmt.Errorf("getRepositoryByID: %v", err) } if !opts.AutoInit { repo.IsBare = true } repo.DefaultBranch = "master" if err = updateRepository(e, repo, false); err != nil { return fmt.Errorf("updateRepository: %v", err) } return nil } func createRepository(e *xorm.Session, u *User, repo *Repository) (err error) { if err = IsUsableName(repo.Name); err != nil { return err } has, err := isRepositoryExist(e, u, repo.Name) if err != nil { return fmt.Errorf("IsRepositoryExist: %v", err) } else if has { return ErrRepoAlreadyExist{u.Name, repo.Name} } if _, err = e.Insert(repo); err != nil { return err } u.NumRepos++ // Remember visibility preference. u.LastRepoVisibility = repo.IsPrivate if err = updateUser(e, u); err != nil { return fmt.Errorf("updateUser: %v", err) } // Give access to all members in owner team. if u.IsOrganization() { t, err := u.getOwnerTeam(e) if err != nil { return fmt.Errorf("getOwnerTeam: %v", err) } else if err = t.addRepository(e, repo); err != nil { return fmt.Errorf("addRepository: %v", err) } } else { // Organization automatically called this in addRepository method. if err = repo.recalculateAccesses(e); err != nil { return fmt.Errorf("recalculateAccesses: %v", err) } } if err = watchRepo(e, u.Id, repo.ID, true); err != nil { return fmt.Errorf("watchRepo: %v", err) } else if err = newRepoAction(e, u, repo); err != nil { return fmt.Errorf("newRepoAction: %v", err) } return nil } // CreateRepository creates a repository for given user or organization. func CreateRepository(u *User, opts CreateRepoOptions) (_ *Repository, err error) { if !u.CanCreateRepo() { return nil, ErrReachLimitOfRepo{u.MaxRepoCreation} } repo := &Repository{ OwnerID: u.Id, Owner: u, Name: opts.Name, LowerName: strings.ToLower(opts.Name), Description: opts.Description, IsPrivate: opts.IsPrivate, EnableWiki: true, EnableIssues: true, EnablePulls: true, } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return nil, err } if err = createRepository(sess, u, repo); err != nil { return nil, err } // No need for init mirror. if !opts.IsMirror { repoPath := RepoPath(u.Name, repo.Name) if err = initRepository(sess, repoPath, u, repo, opts); err != nil { if err2 := os.RemoveAll(repoPath); err2 != nil { log.Error(4, "initRepository: %v", err) return nil, fmt.Errorf( "delete repo directory %s/%s failed(2): %v", u.Name, repo.Name, err2) } return nil, fmt.Errorf("initRepository: %v", err) } _, stderr, err := process.ExecDir(-1, repoPath, fmt.Sprintf("CreateRepository(git update-server-info): %s", repoPath), "git", "update-server-info") if err != nil { return nil, errors.New("CreateRepository(git update-server-info): " + stderr) } } return repo, sess.Commit() } func countRepositories(showPrivate bool) int64 { sess := x.NewSession() if !showPrivate { sess.Where("is_private=?", false) } count, err := sess.Count(new(Repository)) if err != nil { log.Error(4, "countRepositories: %v", err) } return count } // CountRepositories returns number of repositories. func CountRepositories() int64 { return countRepositories(true) } // CountPublicRepositories returns number of public repositories. func CountPublicRepositories() int64 { return countRepositories(false) } // RepositoriesWithUsers returns number of repos in given page. func RepositoriesWithUsers(page, pageSize int) (_ []*Repository, err error) { repos := make([]*Repository, 0, pageSize) if err = x.Limit(pageSize, (page-1)*pageSize).Asc("id").Find(&repos); err != nil { return nil, err } for i := range repos { if err = repos[i].GetOwner(); err != nil { return nil, err } } return repos, nil } // RepoPath returns repository path by given user and repository name. func RepoPath(userName, repoName string) string { return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git") } // TransferOwnership transfers all corresponding setting from old user to new one. func TransferOwnership(u *User, newOwnerName string, repo *Repository) error { newOwner, err := GetUserByName(newOwnerName) if err != nil { return fmt.Errorf("get new owner '%s': %v", newOwnerName, err) } // Check if new owner has repository with same name. has, err := IsRepositoryExist(newOwner, repo.Name) if err != nil { return fmt.Errorf("IsRepositoryExist: %v", err) } else if has { return ErrRepoAlreadyExist{newOwnerName, repo.Name} } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return fmt.Errorf("sess.Begin: %v", err) } owner := repo.Owner // Note: we have to set value here to make sure recalculate accesses is based on // new owner. repo.OwnerID = newOwner.Id repo.Owner = newOwner // Update repository. if _, err := sess.Id(repo.ID).Update(repo); err != nil { return fmt.Errorf("update owner: %v", err) } // Remove redundant collaborators. collaborators, err := repo.getCollaborators(sess) if err != nil { return fmt.Errorf("getCollaborators: %v", err) } // Dummy object. collaboration := &Collaboration{RepoID: repo.ID} for _, c := range collaborators { collaboration.UserID = c.Id if c.Id == newOwner.Id || newOwner.IsOrgMember(c.Id) { if _, err = sess.Delete(collaboration); err != nil { return fmt.Errorf("remove collaborator '%d': %v", c.Id, err) } } } // Remove old team-repository relations. if owner.IsOrganization() { if err = owner.getTeams(sess); err != nil { return fmt.Errorf("getTeams: %v", err) } for _, t := range owner.Teams { if !t.hasRepository(sess, repo.ID) { continue } t.NumRepos-- if _, err := sess.Id(t.ID).AllCols().Update(t); err != nil { return fmt.Errorf("decrease team repository count '%d': %v", t.ID, err) } } if err = owner.removeOrgRepo(sess, repo.ID); err != nil { return fmt.Errorf("removeOrgRepo: %v", err) } } if newOwner.IsOrganization() { t, err := newOwner.getOwnerTeam(sess) if err != nil { return fmt.Errorf("getOwnerTeam: %v", err) } else if err = t.addRepository(sess, repo); err != nil { return fmt.Errorf("add to owner team: %v", err) } } else { // Organization called this in addRepository method. if err = repo.recalculateAccesses(sess); err != nil { return fmt.Errorf("recalculateAccesses: %v", err) } } // Update repository count. if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos+1 WHERE id=?", newOwner.Id); err != nil { return fmt.Errorf("increase new owner repository count: %v", err) } else if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", owner.Id); err != nil { return fmt.Errorf("decrease old owner repository count: %v", err) } if err = watchRepo(sess, newOwner.Id, repo.ID, true); err != nil { return fmt.Errorf("watchRepo: %v", err) } else if err = transferRepoAction(sess, u, owner, newOwner, repo); err != nil { return fmt.Errorf("transferRepoAction: %v", err) } // Rename remote repository to new path and delete local copy. if err = os.Rename(RepoPath(owner.Name, repo.Name), RepoPath(newOwner.Name, repo.Name)); err != nil { return fmt.Errorf("rename repository directory: %v", err) } RemoveAllWithNotice("Delete repository local copy", repo.LocalCopyPath()) // Rename remote wiki repository to new path and delete local copy. wikiPath := WikiPath(owner.Name, repo.Name) if com.IsExist(wikiPath) { RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath()) if err = os.Rename(wikiPath, WikiPath(newOwner.Name, repo.Name)); err != nil { return fmt.Errorf("rename repository wiki: %v", err) } } return sess.Commit() } // ChangeRepositoryName changes all corresponding setting from old repository name to new one. func ChangeRepositoryName(u *User, oldRepoName, newRepoName string) (err error) { oldRepoName = strings.ToLower(oldRepoName) newRepoName = strings.ToLower(newRepoName) if err = IsUsableName(newRepoName); err != nil { return err } has, err := IsRepositoryExist(u, newRepoName) if err != nil { return fmt.Errorf("IsRepositoryExist: %v", err) } else if has { return ErrRepoAlreadyExist{u.Name, newRepoName} } repo, err := GetRepositoryByName(u.Id, oldRepoName) if err != nil { return fmt.Errorf("GetRepositoryByName: %v", err) } // Change repository directory name. if err = os.Rename(repo.RepoPath(), RepoPath(u.Name, newRepoName)); err != nil { return fmt.Errorf("rename repository directory: %v", err) } wikiPath := repo.WikiPath() if com.IsExist(wikiPath) { if err = os.Rename(wikiPath, WikiPath(u.Name, newRepoName)); err != nil { return fmt.Errorf("rename repository wiki: %v", err) } RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath()) } return nil } func getRepositoriesByForkID(e Engine, forkID int64) ([]*Repository, error) { repos := make([]*Repository, 0, 10) return repos, e.Where("fork_id=?", forkID).Find(&repos) } // GetRepositoriesByForkID returns all repositories with given fork ID. func GetRepositoriesByForkID(forkID int64) ([]*Repository, error) { return getRepositoriesByForkID(x, forkID) } func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err error) { repo.LowerName = strings.ToLower(repo.Name) if len(repo.Description) > 255 { repo.Description = repo.Description[:255] } if len(repo.Website) > 255 { repo.Website = repo.Website[:255] } if _, err = e.Id(repo.ID).AllCols().Update(repo); err != nil { return fmt.Errorf("update: %v", err) } if visibilityChanged { if err = repo.getOwner(e); err != nil { return fmt.Errorf("getOwner: %v", err) } if repo.Owner.IsOrganization() { // Organization repository need to recalculate access table when visivility is changed. if err = repo.recalculateTeamAccesses(e, 0); err != nil { return fmt.Errorf("recalculateTeamAccesses: %v", err) } } forkRepos, err := getRepositoriesByForkID(e, repo.ID) if err != nil { return fmt.Errorf("getRepositoriesByForkID: %v", err) } for i := range forkRepos { forkRepos[i].IsPrivate = repo.IsPrivate if err = updateRepository(e, forkRepos[i], true); err != nil { return fmt.Errorf("updateRepository[%d]: %v", forkRepos[i].ID, err) } } } return nil } func UpdateRepository(repo *Repository, visibilityChanged bool) (err error) { sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return err } if err = updateRepository(x, repo, visibilityChanged); err != nil { return fmt.Errorf("updateRepository: %v", err) } return sess.Commit() } // DeleteRepository deletes a repository for a user or organization. func DeleteRepository(uid, repoID int64) error { repo := &Repository{ID: repoID, OwnerID: uid} has, err := x.Get(repo) if err != nil { return err } else if !has { return ErrRepoNotExist{repoID, uid, ""} } // In case is a organization. org, err := GetUserByID(uid) if err != nil { return err } if org.IsOrganization() { if err = org.GetTeams(); err != nil { return err } } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return err } if org.IsOrganization() { for _, t := range org.Teams { if !t.hasRepository(sess, repoID) { continue } else if err = t.removeRepository(sess, repo, false); err != nil { return err } } } if err = deleteBeans(sess, &Repository{ID: repoID}, &Access{RepoID: repo.ID}, &Action{RepoID: repo.ID}, &Watch{RepoID: repoID}, &Star{RepoID: repoID}, &Mirror{RepoID: repoID}, &IssueUser{RepoID: repoID}, &Milestone{RepoID: repoID}, &Release{RepoID: repoID}, &Collaboration{RepoID: repoID}, &PullRequest{BaseRepoID: repoID}, ); err != nil { return fmt.Errorf("deleteBeans: %v", err) } // Delete comments and attachments. issues := make([]*Issue, 0, 25) attachmentPaths := make([]string, 0, len(issues)) if err = sess.Where("repo_id=?", repoID).Find(&issues); err != nil { return err } for i := range issues { if _, err = sess.Delete(&Comment{IssueID: issues[i].ID}); err != nil { return err } attachments := make([]*Attachment, 0, 5) if err = sess.Where("issue_id=?", issues[i].ID).Find(&attachments); err != nil { return err } for j := range attachments { attachmentPaths = append(attachmentPaths, attachments[j].LocalPath()) } if _, err = sess.Delete(&Attachment{IssueID: issues[i].ID}); err != nil { return err } } if _, err = sess.Delete(&Issue{RepoID: repoID}); err != nil { return err } if repo.IsFork { if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repo.ForkID); err != nil { return fmt.Errorf("decrease fork count: %v", err) } } if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", uid); err != nil { return err } // Remove repository files. repoPath := repo.repoPath(sess) RemoveAllWithNotice("Delete repository files", repoPath) wikiPaths := []string{repo.WikiPath(), repo.LocalWikiPath()} for _, wikiPath := range wikiPaths { RemoveAllWithNotice("Delete repository wiki", wikiPath) } // Remove attachment files. for i := range attachmentPaths { RemoveAllWithNotice("Delete attachment", attachmentPaths[i]) } if err = sess.Commit(); err != nil { return fmt.Errorf("Commit: %v", err) } if repo.NumForks > 0 { if repo.IsPrivate { forkRepos, err := GetRepositoriesByForkID(repo.ID) if err != nil { return fmt.Errorf("getRepositoriesByForkID: %v", err) } for i := range forkRepos { if err = DeleteRepository(forkRepos[i].OwnerID, forkRepos[i].ID); err != nil { log.Error(4, "DeleteRepository [%d]: %v", forkRepos[i].ID, err) } } } else { if _, err = x.Exec("UPDATE `repository` SET fork_id=0,is_fork=? WHERE fork_id=?", false, repo.ID); err != nil { log.Error(4, "reset 'fork_id' and 'is_fork': %v", err) } } } return nil } // GetRepositoryByRef returns a Repository specified by a GFM reference. // See https://help.github.com/articles/writing-on-github#references for more information on the syntax. func GetRepositoryByRef(ref string) (*Repository, error) { n := strings.IndexByte(ref, byte('/')) if n < 2 { return nil, ErrInvalidReference } userName, repoName := ref[:n], ref[n+1:] user, err := GetUserByName(userName) if err != nil { return nil, err } return GetRepositoryByName(user.Id, repoName) } // GetRepositoryByName returns the repository by given name under user if exists. func GetRepositoryByName(uid int64, repoName string) (*Repository, error) { repo := &Repository{ OwnerID: uid, LowerName: strings.ToLower(repoName), } has, err := x.Get(repo) if err != nil { return nil, err } else if !has { return nil, ErrRepoNotExist{0, uid, repoName} } return repo, err } func getRepositoryByID(e Engine, id int64) (*Repository, error) { repo := new(Repository) has, err := e.Id(id).Get(repo) if err != nil { return nil, err } else if !has { return nil, ErrRepoNotExist{id, 0, ""} } return repo, nil } // GetRepositoryByID returns the repository by given id if exists. func GetRepositoryByID(id int64) (*Repository, error) { return getRepositoryByID(x, id) } // GetRepositories returns a list of repositories of given user. func GetRepositories(uid int64, private bool) ([]*Repository, error) { repos := make([]*Repository, 0, 10) sess := x.Desc("updated") if !private { sess.Where("is_private=?", false) } return repos, sess.Find(&repos, &Repository{OwnerID: uid}) } // GetRecentUpdatedRepositories returns the list of repositories that are recently updated. func GetRecentUpdatedRepositories(page int) (repos []*Repository, err error) { return repos, x.Limit(setting.ExplorePagingNum, (page-1)*setting.ExplorePagingNum). Where("is_private=?", false).Limit(setting.ExplorePagingNum).Desc("updated").Find(&repos) } func getRepositoryCount(e Engine, u *User) (int64, error) { return x.Count(&Repository{OwnerID: u.Id}) } // GetRepositoryCount returns the total number of repositories of user. func GetRepositoryCount(u *User) (int64, error) { return getRepositoryCount(x, u) } type SearchOption struct { Keyword string Uid int64 Limit int Private bool } // SearchRepositoryByName returns given number of repositories whose name contains keyword. func SearchRepositoryByName(opt SearchOption) (repos []*Repository, err error) { if len(opt.Keyword) == 0 { return repos, nil } opt.Keyword = strings.ToLower(opt.Keyword) repos = make([]*Repository, 0, opt.Limit) // Append conditions. sess := x.Limit(opt.Limit) if opt.Uid > 0 { sess.Where("owner_id=?", opt.Uid) } if !opt.Private { sess.And("is_private=?", false) } sess.And("lower_name like ?", "%"+opt.Keyword+"%").Find(&repos) return repos, err } // DeleteRepositoryArchives deletes all repositories' archives. func DeleteRepositoryArchives() error { return x.Where("id > 0").Iterate(new(Repository), func(idx int, bean interface{}) error { repo := bean.(*Repository) return os.RemoveAll(filepath.Join(repo.RepoPath(), "archives")) }) } func gatherMissingRepoRecords() ([]*Repository, error) { repos := make([]*Repository, 0, 10) if err := x.Where("id > 0").Iterate(new(Repository), func(idx int, bean interface{}) error { repo := bean.(*Repository) if !com.IsDir(repo.RepoPath()) { repos = append(repos, repo) } return nil }); err != nil { if err2 := CreateRepositoryNotice(fmt.Sprintf("gatherMissingRepoRecords: %v", err)); err2 != nil { return nil, fmt.Errorf("CreateRepositoryNotice: %v", err) } } return repos, nil } // DeleteMissingRepositories deletes all repository records that lost Git files. func DeleteMissingRepositories() error { repos, err := gatherMissingRepoRecords() if err != nil { return fmt.Errorf("gatherMissingRepoRecords: %v", err) } if len(repos) == 0 { return nil } for _, repo := range repos { log.Trace("Deleting %d/%d...", repo.OwnerID, repo.ID) if err := DeleteRepository(repo.OwnerID, repo.ID); err != nil { if err2 := CreateRepositoryNotice(fmt.Sprintf("DeleteRepository [%d]: %v", repo.ID, err)); err2 != nil { return fmt.Errorf("CreateRepositoryNotice: %v", err) } } } return nil } // ReinitMissingRepositories reinitializes all repository records that lost Git files. func ReinitMissingRepositories() error { repos, err := gatherMissingRepoRecords() if err != nil { return fmt.Errorf("gatherMissingRepoRecords: %v", err) } if len(repos) == 0 { return nil } for _, repo := range repos { log.Trace("Initializing %d/%d...", repo.OwnerID, repo.ID) if err := git.InitRepository(repo.RepoPath(), true); err != nil { if err2 := CreateRepositoryNotice(fmt.Sprintf("InitRepository [%d]: %v", repo.ID, err)); err2 != nil { return fmt.Errorf("CreateRepositoryNotice: %v", err) } } } return nil } // RewriteRepositoryUpdateHook rewrites all repositories' update hook. func RewriteRepositoryUpdateHook() error { return x.Where("id > 0").Iterate(new(Repository), func(idx int, bean interface{}) error { repo := bean.(*Repository) return createUpdateHook(repo.RepoPath()) }) } // statusPool represents a pool of status with true/false. type statusPool struct { lock sync.RWMutex pool map[string]bool } // Start sets value of given name to true in the pool. func (p *statusPool) Start(name string) { p.lock.Lock() defer p.lock.Unlock() p.pool[name] = true } // Stop sets value of given name to false in the pool. func (p *statusPool) Stop(name string) { p.lock.Lock() defer p.lock.Unlock() p.pool[name] = false } // IsRunning checks if value of given name is set to true in the pool. func (p *statusPool) IsRunning(name string) bool { p.lock.RLock() defer p.lock.RUnlock() return p.pool[name] } // Prevent duplicate running tasks. var taskStatusPool = &statusPool{ pool: make(map[string]bool), } const ( _MIRROR_UPDATE = "mirror_update" _GIT_FSCK = "git_fsck" _CHECK_REPOs = "check_repos" ) // MirrorUpdate checks and updates mirror repositories. func MirrorUpdate() { if taskStatusPool.IsRunning(_MIRROR_UPDATE) { return } taskStatusPool.Start(_MIRROR_UPDATE) defer taskStatusPool.Stop(_MIRROR_UPDATE) log.Trace("Doing: MirrorUpdate") mirrors := make([]*Mirror, 0, 10) if err := x.Iterate(new(Mirror), func(idx int, bean interface{}) error { m := bean.(*Mirror) if m.NextUpdate.After(time.Now()) { return nil } if m.Repo == nil { log.Error(4, "Disconnected mirror repository found: %d", m.ID) return nil } repoPath := m.Repo.RepoPath() if _, stderr, err := process.ExecDir(10*time.Minute, repoPath, fmt.Sprintf("MirrorUpdate: %s", repoPath), "git", "remote", "update", "--prune"); err != nil { desc := fmt.Sprintf("Fail to update mirror repository(%s): %s", repoPath, stderr) log.Error(4, desc) if err = CreateRepositoryNotice(desc); err != nil { log.Error(4, "CreateRepositoryNotice: %v", err) } return nil } m.NextUpdate = time.Now().Add(time.Duration(m.Interval) * time.Hour) mirrors = append(mirrors, m) return nil }); err != nil { log.Error(4, "MirrorUpdate: %v", err) } for i := range mirrors { if err := UpdateMirror(mirrors[i]); err != nil { log.Error(4, "UpdateMirror[%d]: %v", mirrors[i].ID, err) } } } // GitFsck calls 'git fsck' to check repository health. func GitFsck() { if taskStatusPool.IsRunning(_GIT_FSCK) { return } taskStatusPool.Start(_GIT_FSCK) defer taskStatusPool.Stop(_GIT_FSCK) log.Trace("Doing: GitFsck") if err := x.Where("id>0").Iterate(new(Repository), func(idx int, bean interface{}) error { repo := bean.(*Repository) repoPath := repo.RepoPath() if err := git.Fsck(repoPath, setting.Cron.RepoHealthCheck.Timeout, setting.Cron.RepoHealthCheck.Args...); err != nil { desc := fmt.Sprintf("Fail to health check repository (%s): %v", repoPath, err) log.Warn(desc) if err = CreateRepositoryNotice(desc); err != nil { log.Error(4, "CreateRepositoryNotice: %v", err) } } return nil }); err != nil { log.Error(4, "GitFsck: %v", err) } } func GitGcRepos() error { args := append([]string{"gc"}, setting.Git.GcArgs...) return x.Where("id > 0").Iterate(new(Repository), func(idx int, bean interface{}) error { repo := bean.(*Repository) if err := repo.GetOwner(); err != nil { return err } _, stderr, err := process.ExecDir(-1, RepoPath(repo.Owner.Name, repo.Name), "Repository garbage collection", "git", args...) if err != nil { return fmt.Errorf("%v: %v", err, stderr) } return nil }) } type repoChecker struct { querySQL, correctSQL string desc string } func repoStatsCheck(checker *repoChecker) { results, err := x.Query(checker.querySQL) if err != nil { log.Error(4, "Select %s: %v", checker.desc, err) return } for _, result := range results { id := com.StrTo(result["id"]).MustInt64() log.Trace("Updating %s: %d", checker.desc, id) _, err = x.Exec(checker.correctSQL, id, id) if err != nil { log.Error(4, "Update %s[%d]: %v", checker.desc, id, err) } } } func CheckRepoStats() { if taskStatusPool.IsRunning(_CHECK_REPOs) { return } taskStatusPool.Start(_CHECK_REPOs) defer taskStatusPool.Stop(_CHECK_REPOs) log.Trace("Doing: CheckRepoStats") checkers := []*repoChecker{ // Repository.NumWatches { "SELECT repo.id FROM `repository` repo WHERE repo.num_watches!=(SELECT COUNT(*) FROM `watch` WHERE repo_id=repo.id)", "UPDATE `repository` SET num_watches=(SELECT COUNT(*) FROM `watch` WHERE repo_id=?) WHERE id=?", "repository count 'num_watches'", }, // Repository.NumStars { "SELECT repo.id FROM `repository` repo WHERE repo.num_stars!=(SELECT COUNT(*) FROM `star` WHERE repo_id=repo.id)", "UPDATE `repository` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE repo_id=?) WHERE id=?", "repository count 'num_stars'", }, // Label.NumIssues { "SELECT label.id FROM `label` WHERE label.num_issues!=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=label.id)", "UPDATE `label` SET num_issues=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=?) WHERE id=?", "label count 'num_issues'", }, // User.NumRepos { "SELECT `user`.id FROM `user` WHERE `user`.num_repos!=(SELECT COUNT(*) FROM `repository` WHERE owner_id=`user`.id)", "UPDATE `user` SET num_repos=(SELECT COUNT(*) FROM `repository` WHERE owner_id=?) WHERE id=?", "user count 'num_repos'", }, // Issue.NumComments { "SELECT `issue`.id FROM `issue` WHERE `issue`.num_comments!=(SELECT COUNT(*) FROM `comment` WHERE issue_id=`issue`.id AND type=0)", "UPDATE `issue` SET num_comments=(SELECT COUNT(*) FROM `comment` WHERE issue_id=? AND type=0) WHERE id=?", "issue count 'num_comments'", }, } for i := range checkers { repoStatsCheck(checkers[i]) } // FIXME: use checker when v0.9, stop supporting old fork repo format. // ***** START: Repository.NumForks ***** results, err := x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_forks!=(SELECT COUNT(*) FROM `repository` WHERE fork_id=repo.id)") if err != nil { log.Error(4, "Select repository count 'num_forks': %v", err) } else { for _, result := range results { id := com.StrTo(result["id"]).MustInt64() log.Trace("Updating repository count 'num_forks': %d", id) repo, err := GetRepositoryByID(id) if err != nil { log.Error(4, "GetRepositoryByID[%d]: %v", id, err) continue } rawResult, err := x.Query("SELECT COUNT(*) FROM `repository` WHERE fork_id=?", repo.ID) if err != nil { log.Error(4, "Select count of forks[%d]: %v", repo.ID, err) continue } repo.NumForks = int(parseCountResult(rawResult)) if err = UpdateRepository(repo, false); err != nil { log.Error(4, "UpdateRepository[%d]: %v", id, err) continue } } } // ***** END: Repository.NumForks ***** } // _________ .__ .__ ___. __ .__ // \_ ___ \ ____ | | | | _____ \_ |__ ________________ _/ |_|__| ____ ____ // / \ \/ / _ \| | | | \__ \ | __ \ / _ \_ __ \__ \\ __\ |/ _ \ / \ // \ \___( <_> ) |_| |__/ __ \| \_\ ( <_> ) | \// __ \| | | ( <_> ) | \ // \______ /\____/|____/____(____ /___ /\____/|__| (____ /__| |__|\____/|___| / // \/ \/ \/ \/ \/ // A Collaboration is a relation between an individual and a repository type Collaboration struct { ID int64 `xorm:"pk autoincr"` RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` UserID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` Created time.Time `xorm:"CREATED"` } // Add collaborator and accompanying access func (repo *Repository) AddCollaborator(u *User) error { collaboration := &Collaboration{ RepoID: repo.ID, UserID: u.Id, } has, err := x.Get(collaboration) if err != nil { return err } else if has { return nil } if err = repo.GetOwner(); err != nil { return fmt.Errorf("GetOwner: %v", err) } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return err } if _, err = sess.InsertOne(collaboration); err != nil { return err } if repo.Owner.IsOrganization() { err = repo.recalculateTeamAccesses(sess, 0) } else { err = repo.recalculateAccesses(sess) } if err != nil { return fmt.Errorf("recalculateAccesses 'team=%v': %v", repo.Owner.IsOrganization(), err) } return sess.Commit() } func (repo *Repository) getCollaborators(e Engine) ([]*User, error) { collaborations := make([]*Collaboration, 0) if err := e.Find(&collaborations, &Collaboration{RepoID: repo.ID}); err != nil { return nil, err } users := make([]*User, len(collaborations)) for i, c := range collaborations { user, err := getUserByID(e, c.UserID) if err != nil { return nil, err } users[i] = user } return users, nil } // GetCollaborators returns the collaborators for a repository func (repo *Repository) GetCollaborators() ([]*User, error) { return repo.getCollaborators(x) } // Delete collaborator and accompanying access func (repo *Repository) DeleteCollaborator(u *User) (err error) { collaboration := &Collaboration{ RepoID: repo.ID, UserID: u.Id, } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return err } if has, err := sess.Delete(collaboration); err != nil || has == 0 { return err } else if err = repo.recalculateAccesses(sess); err != nil { return err } return sess.Commit() } // __ __ __ .__ // / \ / \_____ _/ |_ ____ | |__ // \ \/\/ /\__ \\ __\/ ___\| | \ // \ / / __ \| | \ \___| Y \ // \__/\ / (____ /__| \___ >___| / // \/ \/ \/ \/ // Watch is connection request for receiving repository notification. type Watch struct { ID int64 `xorm:"pk autoincr"` UserID int64 `xorm:"UNIQUE(watch)"` RepoID int64 `xorm:"UNIQUE(watch)"` } func isWatching(e Engine, uid, repoId int64) bool { has, _ := e.Get(&Watch{0, uid, repoId}) return has } // IsWatching checks if user has watched given repository. func IsWatching(uid, repoId int64) bool { return isWatching(x, uid, repoId) } func watchRepo(e Engine, uid, repoId int64, watch bool) (err error) { if watch { if isWatching(e, uid, repoId) { return nil } if _, err = e.Insert(&Watch{RepoID: repoId, UserID: uid}); err != nil { return err } _, err = e.Exec("UPDATE `repository` SET num_watches = num_watches + 1 WHERE id = ?", repoId) } else { if !isWatching(e, uid, repoId) { return nil } if _, err = e.Delete(&Watch{0, uid, repoId}); err != nil { return err } _, err = e.Exec("UPDATE `repository` SET num_watches=num_watches-1 WHERE id=?", repoId) } return err } // Watch or unwatch repository. func WatchRepo(uid, repoId int64, watch bool) (err error) { return watchRepo(x, uid, repoId, watch) } func getWatchers(e Engine, repoID int64) ([]*Watch, error) { watches := make([]*Watch, 0, 10) return watches, e.Find(&watches, &Watch{RepoID: repoID}) } // GetWatchers returns all watchers of given repository. func GetWatchers(repoID int64) ([]*Watch, error) { return getWatchers(x, repoID) } // Repository.GetWatchers returns range of users watching given repository. func (repo *Repository) GetWatchers(page int) ([]*User, error) { users := make([]*User, 0, ItemsPerPage) sess := x.Limit(ItemsPerPage, (page-1)*ItemsPerPage).Where("watch.repo_id=?", repo.ID) if setting.UsePostgreSQL { sess = sess.Join("LEFT", "watch", `"user".id=watch.user_id`) } else { sess = sess.Join("LEFT", "watch", "user.id=watch.user_id") } return users, sess.Find(&users) } func notifyWatchers(e Engine, act *Action) error { // Add feeds for user self and all watchers. watches, err := getWatchers(e, act.RepoID) if err != nil { return fmt.Errorf("get watchers: %v", err) } // Add feed for actioner. act.UserID = act.ActUserID if _, err = e.InsertOne(act); err != nil { return fmt.Errorf("insert new actioner: %v", err) } for i := range watches { if act.ActUserID == watches[i].UserID { continue } act.ID = 0 act.UserID = watches[i].UserID if _, err = e.InsertOne(act); err != nil { return fmt.Errorf("insert new action: %v", err) } } return nil } // NotifyWatchers creates batch of actions for every watcher. func NotifyWatchers(act *Action) error { return notifyWatchers(x, act) } // _________ __ // / _____// |______ _______ // \_____ \\ __\__ \\_ __ \ // / \| | / __ \| | \/ // /_______ /|__| (____ /__| // \/ \/ type Star struct { ID int64 `xorm:"pk autoincr"` UID int64 `xorm:"UNIQUE(s)"` RepoID int64 `xorm:"UNIQUE(s)"` } // Star or unstar repository. func StarRepo(uid, repoId int64, star bool) (err error) { if star { if IsStaring(uid, repoId) { return nil } if _, err = x.Insert(&Star{UID: uid, RepoID: repoId}); err != nil { return err } else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars + 1 WHERE id = ?", repoId); err != nil { return err } _, err = x.Exec("UPDATE `user` SET num_stars = num_stars + 1 WHERE id = ?", uid) } else { if !IsStaring(uid, repoId) { return nil } if _, err = x.Delete(&Star{0, uid, repoId}); err != nil { return err } else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars - 1 WHERE id = ?", repoId); err != nil { return err } _, err = x.Exec("UPDATE `user` SET num_stars = num_stars - 1 WHERE id = ?", uid) } return err } // IsStaring checks if user has starred given repository. func IsStaring(uid, repoId int64) bool { has, _ := x.Get(&Star{0, uid, repoId}) return has } func (repo *Repository) GetStargazers(page int) ([]*User, error) { users := make([]*User, 0, ItemsPerPage) sess := x.Limit(ItemsPerPage, (page-1)*ItemsPerPage).Where("star.repo_id=?", repo.ID) if setting.UsePostgreSQL { sess = sess.Join("LEFT", "star", `"user".id=star.uid`) } else { sess = sess.Join("LEFT", "star", "user.id=star.uid") } return users, sess.Find(&users) } // ___________ __ // \_ _____/__________| | __ // | __)/ _ \_ __ \ |/ / // | \( <_> ) | \/ < // \___ / \____/|__| |__|_ \ // \/ \/ // HasForkedRepo checks if given user has already forked a repository with given ID. func HasForkedRepo(ownerID, repoID int64) (*Repository, bool) { repo := new(Repository) has, _ := x.Where("owner_id=? AND fork_id=?", ownerID, repoID).Get(repo) return repo, has } func ForkRepository(u *User, oldRepo *Repository, name, desc string) (_ *Repository, err error) { repo := &Repository{ OwnerID: u.Id, Owner: u, Name: name, LowerName: strings.ToLower(name), Description: desc, DefaultBranch: oldRepo.DefaultBranch, IsPrivate: oldRepo.IsPrivate, IsFork: true, ForkID: oldRepo.ID, } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return nil, err } if err = createRepository(sess, u, repo); err != nil { return nil, err } if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks+1 WHERE id=?", oldRepo.ID); err != nil { return nil, err } repoPath := RepoPath(u.Name, repo.Name) _, stderr, err := process.ExecTimeout(10*time.Minute, fmt.Sprintf("ForkRepository(git clone): %s/%s", u.Name, repo.Name), "git", "clone", "--bare", oldRepo.RepoPath(), repoPath) if err != nil { return nil, fmt.Errorf("git clone: %v", stderr) } _, stderr, err = process.ExecDir(-1, repoPath, fmt.Sprintf("ForkRepository(git update-server-info): %s", repoPath), "git", "update-server-info") if err != nil { return nil, fmt.Errorf("git update-server-info: %v", err) } if err = createUpdateHook(repoPath); err != nil { return nil, fmt.Errorf("createUpdateHook: %v", err) } return repo, sess.Commit() } func (repo *Repository) GetForks() ([]*Repository, error) { forks := make([]*Repository, 0, repo.NumForks) return forks, x.Find(&forks, &Repository{ForkID: repo.ID}) }
1
10,335
Call `userName` is good enough, `signedUserName` is narrowing the actual usage of this method.
gogs-gogs
go
@@ -65,6 +65,13 @@ namespace Datadog.Trace AppDomain.CurrentDomain.ProcessExit += CurrentDomain_ProcessExit; AppDomain.CurrentDomain.UnhandledException += CurrentDomain_UnhandledException; Console.CancelKeyPress += Console_CancelKeyPress; + + // If configured, add/remove the correlation identifiers into the + // LibLog logging context when a scope is activated/closed + if (Settings.LogsInjectionEnabled) + { + new LibLogCorrelationIdentifierScopeSubscriber(_scopeManager); + } } /// <summary>
1
using System; using System.Collections.Generic; using System.Diagnostics; using System.Reflection; using Datadog.Trace.Agent; using Datadog.Trace.Configuration; using Datadog.Trace.Logging; using Datadog.Trace.Sampling; namespace Datadog.Trace { /// <summary> /// The tracer is responsible for creating spans and flushing them to the Datadog agent /// </summary> public class Tracer : IDatadogTracer { private const string UnknownServiceName = "UnknownService"; private static readonly ILog Log = LogProvider.For<Tracer>(); private readonly IScopeManager _scopeManager; private readonly IAgentWriter _agentWriter; static Tracer() { // create the default global Tracer Instance = new Tracer(); } /// <summary> /// Initializes a new instance of the <see cref="Tracer"/> class with default settings. /// </summary> public Tracer() : this(settings: null, agentWriter: null, sampler: null, scopeManager: null) { } /// <summary> /// Initializes a new instance of the <see cref="Tracer"/> /// class using the specified <see cref="IConfigurationSource"/>. /// </summary> /// <param name="settings"> /// A <see cref="TracerSettings"/> instance with the desired settings, /// or null to use the default configuration sources. /// </param> public Tracer(TracerSettings settings) : this(settings, agentWriter: null, sampler: null, scopeManager: null) { } internal Tracer(TracerSettings settings, IAgentWriter agentWriter, ISampler sampler, IScopeManager scopeManager) { // fall back to default implementations of each dependency if not provided Settings = settings ?? TracerSettings.FromDefaultSources(); _agentWriter = agentWriter ?? new AgentWriter(new Api(Settings.AgentUri)); _scopeManager = scopeManager ?? new AsyncLocalScopeManager(); Sampler = sampler ?? new RateByServiceSampler(); // if not configured, try to determine an appropriate service name DefaultServiceName = Settings.ServiceName ?? GetApplicationName() ?? UnknownServiceName; // Register callbacks to make sure we flush the traces before exiting AppDomain.CurrentDomain.ProcessExit += CurrentDomain_ProcessExit; AppDomain.CurrentDomain.UnhandledException += CurrentDomain_UnhandledException; Console.CancelKeyPress += Console_CancelKeyPress; } /// <summary> /// Gets or sets the global tracer object /// </summary> public static Tracer Instance { get; set; } /// <summary> /// Gets the active scope /// </summary> public Scope ActiveScope => _scopeManager.Active; /// <summary> /// Gets a value indicating whether debugging mode is enabled. /// </summary> /// <value><c>true</c> is debugging is enabled, otherwise <c>false</c>.</value> bool IDatadogTracer.IsDebugEnabled => Settings.DebugEnabled; /// <summary> /// Gets the default service name for traces where a service name is not specified. /// </summary> public string DefaultServiceName { get; } /// <summary> /// Gets this tracer's settings. /// </summary> public TracerSettings Settings { get; } /// <summary> /// Gets the tracer's scope manager, which determines which span is currently active, if any. /// </summary> IScopeManager IDatadogTracer.ScopeManager => _scopeManager; /// <summary> /// Gets the <see cref="ISampler"/> instance used by this <see cref="IDatadogTracer"/> instance. /// </summary> ISampler IDatadogTracer.Sampler => Sampler; internal ISampler Sampler { get; } /// <summary> /// Create a new Tracer with the given parameters /// </summary> /// <param name="agentEndpoint">The agent endpoint where the traces will be sent (default is http://localhost:8126).</param> /// <param name="defaultServiceName">Default name of the service (default is the name of the executing assembly).</param> /// <param name="isDebugEnabled">Turns on all debug logging (this may have an impact on application performance).</param> /// <returns>The newly created tracer</returns> public static Tracer Create(Uri agentEndpoint = null, string defaultServiceName = null, bool isDebugEnabled = false) { // Keep supporting this older public method by creating a TracerConfiguration // from default sources, overwriting the specified settings, and passing that to the constructor. var configuration = TracerSettings.FromDefaultSources(); configuration.DebugEnabled = isDebugEnabled; if (agentEndpoint != null) { configuration.AgentUri = agentEndpoint; } if (defaultServiceName != null) { configuration.ServiceName = defaultServiceName; } return new Tracer(configuration); } /// <summary> /// Make a span active and return a scope that can be disposed to close the span /// </summary> /// <param name="span">The span to activate</param> /// <param name="finishOnClose">If set to false, closing the returned scope will not close the enclosed span </param> /// <returns>A Scope object wrapping this span</returns> public Scope ActivateSpan(Span span, bool finishOnClose = true) { return _scopeManager.Activate(span, finishOnClose); } /// <summary> /// This is a shortcut for <see cref="StartSpan"/> and <see cref="ActivateSpan"/>, it creates a new span with the given parameters and makes it active. /// </summary> /// <param name="operationName">The span's operation name</param> /// <param name="parent">The span's parent</param> /// <param name="serviceName">The span's service name</param> /// <param name="startTime">An explicit start time for that span</param> /// <param name="ignoreActiveScope">If set the span will not be a child of the currently active span</param> /// <param name="finishOnClose">If set to false, closing the returned scope will not close the enclosed span </param> /// <returns>A scope wrapping the newly created span</returns> public Scope StartActive(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false, bool finishOnClose = true) { var span = StartSpan(operationName, parent, serviceName, startTime, ignoreActiveScope); return _scopeManager.Activate(span, finishOnClose); } /// <summary> /// Creates a new <see cref="Span"/> with the specified parameters. /// </summary> /// <param name="operationName">The span's operation name</param> /// <param name="parent">The span's parent</param> /// <param name="serviceName">The span's service name</param> /// <param name="startTime">An explicit start time for that span</param> /// <param name="ignoreActiveScope">If set the span will not be a child of the currently active span</param> /// <returns>The newly created span</returns> public Span StartSpan(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false) { if (parent == null && !ignoreActiveScope) { parent = _scopeManager.Active?.Span?.Context; } ITraceContext traceContext; // try to get the trace context (from local spans) or // sampling priority (from propagated spans), // otherwise start a new trace context if (parent is SpanContext parentSpanContext) { traceContext = parentSpanContext.TraceContext ?? new TraceContext(this) { SamplingPriority = parentSpanContext.SamplingPriority }; } else { traceContext = new TraceContext(this); } var finalServiceName = serviceName ?? parent?.ServiceName ?? DefaultServiceName; var spanContext = new SpanContext(parent, traceContext, finalServiceName); var span = new Span(spanContext, startTime) { OperationName = operationName, }; var env = Settings.Environment; // automatically add the "env" tag if defined if (!string.IsNullOrWhiteSpace(env)) { span.SetTag(Tags.Env, env); } traceContext.AddSpan(span); return span; } /// <summary> /// Writes the specified <see cref="Span"/> collection to the agent writer. /// </summary> /// <param name="trace">The <see cref="Span"/> collection to write.</param> void IDatadogTracer.Write(List<Span> trace) { _agentWriter.WriteTrace(trace); } /// <summary> /// Create an Uri to the Agent using host and port from /// the specified <paramref name="settings"/>. /// </summary> /// <param name="settings">A <see cref="TracerSettings"/> object </param> /// <returns>An Uri that can be used to send traces to the Agent.</returns> internal static Uri GetAgentUri(TracerSettings settings) { return settings.AgentUri; } /// <summary> /// Gets an "application name" for the executing application by looking at /// the hosted app name (.NET Framework on IIS only), assembly name, and process name. /// </summary> /// <returns>The default service name.</returns> private static string GetApplicationName() { try { #if !NETSTANDARD2_0 // System.Web.dll is only available on .NET Framework if (System.Web.Hosting.HostingEnvironment.IsHosted) { // if this app is an ASP.NET application, return "SiteName/ApplicationVirtualPath". // note that ApplicationVirtualPath includes a leading slash. return (System.Web.Hosting.HostingEnvironment.SiteName + System.Web.Hosting.HostingEnvironment.ApplicationVirtualPath).TrimEnd('/'); } #endif return Assembly.GetEntryAssembly()?.GetName().Name ?? Process.GetCurrentProcess().ProcessName; } catch (Exception ex) { Log.ErrorException("Error creating default service name.", ex); return null; } } private void CurrentDomain_ProcessExit(object sender, EventArgs e) { _agentWriter.FlushAndCloseAsync().Wait(); } private void CurrentDomain_UnhandledException(object sender, UnhandledExceptionEventArgs e) { _agentWriter.FlushAndCloseAsync().Wait(); } private void Console_CancelKeyPress(object sender, ConsoleCancelEventArgs e) { _agentWriter.FlushAndCloseAsync().Wait(); } } }
1
15,101
How would you feel about a static method to express intent?
DataDog-dd-trace-dotnet
.cs
@@ -75,7 +75,8 @@ func (t LoggableTLSConnState) MarshalLogObject(enc zapcore.ObjectEncoder) error enc.AddUint16("version", t.Version) enc.AddUint16("cipher_suite", t.CipherSuite) enc.AddString("proto", t.NegotiatedProtocol) - enc.AddBool("proto_mutual", t.NegotiatedProtocolIsMutual) + // NegotiatedProtocolIsMutual is deprecated - it's always true + enc.AddBool("proto_mutual", true) enc.AddString("server_name", t.ServerName) if len(t.PeerCertificates) > 0 { enc.AddString("client_common_name", t.PeerCertificates[0].Subject.CommonName)
1
// Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "crypto/tls" "net/http" "go.uber.org/zap/zapcore" ) // LoggableHTTPRequest makes an HTTP request loggable with zap.Object(). type LoggableHTTPRequest struct{ *http.Request } // MarshalLogObject satisfies the zapcore.ObjectMarshaler interface. func (r LoggableHTTPRequest) MarshalLogObject(enc zapcore.ObjectEncoder) error { enc.AddString("remote_addr", r.RemoteAddr) enc.AddString("proto", r.Proto) enc.AddString("method", r.Method) enc.AddString("host", r.Host) enc.AddString("uri", r.RequestURI) enc.AddObject("headers", LoggableHTTPHeader(r.Header)) if r.TLS != nil { enc.AddObject("tls", LoggableTLSConnState(*r.TLS)) } return nil } // LoggableHTTPHeader makes an HTTP header loggable with zap.Object(). type LoggableHTTPHeader http.Header // MarshalLogObject satisfies the zapcore.ObjectMarshaler interface. func (h LoggableHTTPHeader) MarshalLogObject(enc zapcore.ObjectEncoder) error { if h == nil { return nil } for key, val := range h { enc.AddArray(key, LoggableStringArray(val)) } return nil } // LoggableStringArray makes a slice of strings marshalable for logging. type LoggableStringArray []string // MarshalLogArray satisfies the zapcore.ArrayMarshaler interface. func (sa LoggableStringArray) MarshalLogArray(enc zapcore.ArrayEncoder) error { if sa == nil { return nil } for _, s := range sa { enc.AppendString(s) } return nil } // LoggableTLSConnState makes a TLS connection state loggable with zap.Object(). type LoggableTLSConnState tls.ConnectionState // MarshalLogObject satisfies the zapcore.ObjectMarshaler interface. func (t LoggableTLSConnState) MarshalLogObject(enc zapcore.ObjectEncoder) error { enc.AddBool("resumed", t.DidResume) enc.AddUint16("version", t.Version) enc.AddUint16("cipher_suite", t.CipherSuite) enc.AddString("proto", t.NegotiatedProtocol) enc.AddBool("proto_mutual", t.NegotiatedProtocolIsMutual) enc.AddString("server_name", t.ServerName) if len(t.PeerCertificates) > 0 { enc.AddString("client_common_name", t.PeerCertificates[0].Subject.CommonName) enc.AddString("client_serial", t.PeerCertificates[0].SerialNumber.String()) } return nil } // Interface guards var ( _ zapcore.ObjectMarshaler = (*LoggableHTTPRequest)(nil) _ zapcore.ObjectMarshaler = (*LoggableHTTPHeader)(nil) _ zapcore.ArrayMarshaler = (*LoggableStringArray)(nil) _ zapcore.ObjectMarshaler = (*LoggableTLSConnState)(nil) )
1
15,749
Do we know for sure that no other code changed its value? (Maybe some weird testing use case or something) Why not just use the actual value instead of risk lying?
caddyserver-caddy
go
@@ -16,8 +16,7 @@ package account -//go:generate dbgen -i root.sql -p account -n root -o rootInstall.go -//go:generate dbgen -i part.sql -p account -n part -o partInstall.go +//go:generate dbgen -i root.sql -p account -n root -o rootInstall.go -h ../../scripts/LICENSE_HEADER import ( "database/sql" "fmt"
1
// Copyright (C) 2019-2020 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package account //go:generate dbgen -i root.sql -p account -n root -o rootInstall.go //go:generate dbgen -i part.sql -p account -n part -o partInstall.go import ( "database/sql" "fmt" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" ) // A Root encapsulates a set of secrets which controls some store of money. // // A Root is authorized to spend money and create Participations // for which this account is the parent. // // It handles persistence and secure deletion of secrets. type Root struct { secrets *crypto.SignatureSecrets store db.Accessor } // GenerateRoot uses the system's source of randomness to generate an // account. func GenerateRoot(store db.Accessor) (Root, error) { var seed crypto.Seed crypto.RandBytes(seed[:]) return ImportRoot(store, seed) } // ImportRoot uses a provided source of randomness to instantiate an // account. func ImportRoot(store db.Accessor, seed [32]byte) (acc Root, err error) { s := crypto.GenerateSignatureSecrets(seed) raw := protocol.Encode(s) err = store.Atomic(func(tx *sql.Tx) error { err := rootInstallDatabase(tx) if err != nil { return fmt.Errorf("ImportRoot: failed to install database: %v", err) } stmt, err := tx.Prepare("insert into RootAccount values (?)") if err != nil { return fmt.Errorf("ImportRoot: failed to prepare statement: %v", err) } _, err = stmt.Exec(raw) if err != nil { return fmt.Errorf("ImportRoot: failed to insert account: %v", err) } return nil }) if err != nil { return } acc.secrets = s acc.store = store return } // RestoreRoot restores a Root from a database handle. func RestoreRoot(store db.Accessor) (acc Root, err error) { var raw []byte err = store.Atomic(func(tx *sql.Tx) error { var nrows int row := tx.QueryRow("select count(*) from RootAccount") err := row.Scan(&nrows) if err != nil { return fmt.Errorf("RestoreRoot: could not query storage: %v", err) } if nrows != 1 { logging.Base().Infof("RestoreRoot: state not found (n = %v)", nrows) } row = tx.QueryRow("select data from RootAccount") err = row.Scan(&raw) if err != nil { return fmt.Errorf("RestoreRoot: could not read account raw data: %v", err) } return nil }) if err != nil { return } acc.secrets = &crypto.SignatureSecrets{} err = protocol.Decode(raw, acc.secrets) if err != nil { err = fmt.Errorf("RestoreRoot: error decoding account: %v", err) return } acc.store = store return } // Secrets returns the signing secrets associated with the Root account. func (root Root) Secrets() *crypto.SignatureSecrets { return root.secrets } // Address returns the address associated with the Root account. func (root Root) Address() basics.Address { return basics.Address(root.secrets.SignatureVerifier) } // RestoreParticipation restores a Participation from a database // handle. func RestoreParticipation(store db.Accessor) (acc Participation, err error) { var rawParent, rawVRF, rawVoting []byte err = Migrate(store) if err != nil { return } err = store.Atomic(func(tx *sql.Tx) error { var nrows int row := tx.QueryRow("select count(*) from ParticipationAccount") err := row.Scan(&nrows) if err != nil { return fmt.Errorf("RestoreParticipation: could not query storage: %v", err) } if nrows != 1 { logging.Base().Infof("RestoreParticipation: state not found (n = %v)", nrows) } row = tx.QueryRow("select parent, vrf, voting, firstValid, lastValid, keyDilution from ParticipationAccount") err = row.Scan(&rawParent, &rawVRF, &rawVoting, &acc.FirstValid, &acc.LastValid, &acc.KeyDilution) if err != nil { return fmt.Errorf("RestoreParticipation: could not read account raw data: %v", err) } copy(acc.Parent[:32], rawParent) return nil }) if err != nil { return Participation{}, err } acc.VRF = &crypto.VRFSecrets{} err = protocol.Decode(rawVRF, acc.VRF) if err != nil { return Participation{}, err } acc.Voting = &crypto.OneTimeSignatureSecrets{} err = protocol.Decode(rawVoting, acc.Voting) if err != nil { return Participation{}, err } acc.Store = store return acc, nil } // A ParticipationInterval defines an interval for which a participation account is valid. type ParticipationInterval struct { basics.Address // FirstValid and LastValid are inclusive. FirstValid basics.Round LastValid basics.Round }
1
38,996
the partInstall.go isn't an auto-generated file, and the part.sql doesn't exists either.
algorand-go-algorand
go
@@ -97,3 +97,11 @@ func (c *TTLCache) Delete(key interface{}) error { c.entries.Delete(key) return nil } + +func (c *TTLCache) GetAll() ([]interface{}, error) { + return nil, cache.ErrUnimplemented +} + +func (c *TTLCache) PutHash(k interface{}, v interface{}) error { + return cache.ErrUnimplemented +}
1
// Copyright 2020 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package memorycache import ( "context" "sync" "time" "github.com/pipe-cd/pipe/pkg/cache" "github.com/pipe-cd/pipe/pkg/cache/cachemetrics" ) type entry struct { value interface{} expiration time.Time } type TTLCache struct { entries sync.Map ttl time.Duration ctx context.Context } func NewTTLCache(ctx context.Context, ttl time.Duration, evictionInterval time.Duration) *TTLCache { c := &TTLCache{ ttl: ttl, ctx: ctx, } if evictionInterval > 0 { go c.startEvicter(evictionInterval) } return c } func (c *TTLCache) startEvicter(interval time.Duration) { ticker := time.NewTicker(interval) for { select { case now := <-ticker.C: c.evictExpired(now) case <-c.ctx.Done(): ticker.Stop() return } } } func (c *TTLCache) evictExpired(t time.Time) { c.entries.Range(func(key interface{}, value interface{}) bool { e := value.(*entry) if e.expiration.Before(t) { c.entries.Delete(key) } return true }) } func (c *TTLCache) Get(key interface{}) (interface{}, error) { item, ok := c.entries.Load(key) if !ok { cachemetrics.IncGetOperationCounter( cachemetrics.LabelSourceInmemory, cachemetrics.LabelStatusMiss, ) return nil, cache.ErrNotFound } cachemetrics.IncGetOperationCounter( cachemetrics.LabelSourceInmemory, cachemetrics.LabelStatusHit, ) return item.(*entry).value, nil } func (c *TTLCache) Put(key interface{}, value interface{}) error { e := &entry{ value: value, expiration: time.Now().Add(c.ttl), } c.entries.Store(key, e) return nil } func (c *TTLCache) Delete(key interface{}) error { c.entries.Delete(key) return nil }
1
17,840
`k` is unused in PutHash
pipe-cd-pipe
go
@@ -277,4 +277,12 @@ public class Utils { Method method = clazz.getDeclaredMethod(methodName, argTypes); return method.invoke(null, args); } + + public static void copyStream(InputStream input, OutputStream output) throws IOException { + byte[] buffer = new byte[1024]; + int bytesRead; + while ((bytesRead = input.read(buffer)) != -1) { + output.write(buffer, 0, bytesRead); + } + } }
1
/* * Copyright 2012 LinkedIn, Inc * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.Collection; import java.util.Enumeration; import java.util.Random; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; import java.util.zip.ZipOutputStream; import org.apache.commons.io.IOUtils; /** * A util helper class full of static methods that are commonly used. */ public class Utils { public static final Random RANDOM = new Random(); /** * Private constructor. */ private Utils() { } /** * Equivalent to Object.equals except that it handles nulls. If a and b are * both null, true is returned. * * @param a * @param b * @return */ public static boolean equals(Object a, Object b) { if (a == null || b == null) { return a == b; } return a.equals(b); } /** * Return the object if it is non-null, otherwise throw an exception * * @param <T> * The type of the object * @param t * The object * @return The object if it is not null * @throws IllegalArgumentException * if the object is null */ public static <T> T nonNull(T t) { if (t == null) { throw new IllegalArgumentException("Null value not allowed."); } else { return t; } } /** * Print the message and then exit with the given exit code * * @param message * The message to print * @param exitCode * The exit code */ public static void croak(String message, int exitCode) { System.err.println(message); System.exit(exitCode); } public static File createTempDir() { return createTempDir(new File(System.getProperty("java.io.tmpdir"))); } public static File createTempDir(File parent) { File temp = new File(parent, Integer.toString(Math.abs(RANDOM.nextInt()) % 100000000)); temp.delete(); temp.mkdir(); temp.deleteOnExit(); return temp; } public static void zip(File input, File output) throws IOException { FileOutputStream out = new FileOutputStream(output); ZipOutputStream zOut = new ZipOutputStream(out); zipFile("", input, zOut); zOut.close(); } private static void zipFile(String path, File input, ZipOutputStream zOut) throws IOException { if (input.isDirectory()) { File[] files = input.listFiles(); if (files != null) { for (File f : files) { String childPath = path + input.getName() + (f.isDirectory() ? "/" : ""); zipFile(childPath, f, zOut); } } } else { String childPath = path + (path.length() > 0 ? "/" : "") + input.getName(); ZipEntry entry = new ZipEntry(childPath); zOut.putNextEntry(entry); InputStream fileInputStream = new BufferedInputStream( new FileInputStream(input)); IOUtils.copy(fileInputStream, zOut); fileInputStream.close(); } } public static void unzip(ZipFile source, File dest) throws IOException { Enumeration<?> entries = source.entries(); while (entries.hasMoreElements()) { ZipEntry entry = (ZipEntry) entries.nextElement(); File newFile = new File(dest, entry.getName()); if (entry.isDirectory()) { newFile.mkdirs(); } else { newFile.getParentFile().mkdirs(); InputStream src = source.getInputStream(entry); OutputStream output = new BufferedOutputStream( new FileOutputStream(newFile)); IOUtils.copy(src, output); src.close(); output.close(); } } } public static String flattenToString(Collection<?> collection, String delimiter) { StringBuffer buffer = new StringBuffer(); for (Object obj : collection) { buffer.append(obj.toString()); buffer.append(','); } if (buffer.length() > 0) { buffer.setLength(buffer.length() - 1); } return buffer.toString(); } public static Double convertToDouble(Object obj) { if (obj instanceof String) { return Double.parseDouble((String) obj); } return (Double) obj; } /** * Get the root cause of the Exception * * @param e The Exception * @return The root cause of the Exception */ private static RuntimeException getCause(InvocationTargetException e) { Throwable cause = e.getCause(); if(cause instanceof RuntimeException) throw (RuntimeException) cause; else throw new IllegalStateException(e.getCause()); } /** * Get the Class of all the objects * * @param args The objects to get the Classes from * @return The classes as an array */ public static Class<?>[] getTypes(Object... args) { Class<?>[] argTypes = new Class<?>[args.length]; for(int i = 0; i < argTypes.length; i++) argTypes[i] = args[i].getClass(); return argTypes; } public static Object callConstructor(Class<?> c, Object... args) { return callConstructor(c, getTypes(args), args); } /** * Call the class constructor with the given arguments * * @param c The class * @param args The arguments * @return The constructed object */ public static Object callConstructor(Class<?> c, Class<?>[] argTypes, Object[] args) { try { Constructor<?> cons = c.getConstructor(argTypes); return cons.newInstance(args); } catch(InvocationTargetException e) { throw getCause(e); } catch(IllegalAccessException e) { throw new IllegalStateException(e); } catch(NoSuchMethodException e) { throw new IllegalStateException(e); } catch(InstantiationException e) { throw new IllegalStateException(e); } } public static String formatDuration(long startTime, long endTime) { if (startTime == -1) { return "-"; } long durationMS; if (endTime == -1) { durationMS = System.currentTimeMillis() - startTime; } else { durationMS = endTime - startTime; } long seconds = durationMS/1000; if (seconds < 60) { return seconds + " sec"; } long minutes = seconds / 60; seconds %= 60; if (minutes < 60) { return minutes + "m " + seconds + "s"; } long hours = minutes / 60; minutes %= 60; if (hours < 24) { return hours + "h " + minutes + "m " + seconds + "s"; } long days = hours / 24; hours %= 24; return days + "d " + hours + "h " + minutes + "m"; } public static Object invokeStaticMethod(ClassLoader loader, String className, String methodName, Object ... args) throws ClassNotFoundException, SecurityException, NoSuchMethodException, IllegalArgumentException, IllegalAccessException, InvocationTargetException { Class<?> clazz = loader.loadClass(className); Class<?>[] argTypes = new Class[args.length]; for (int i=0; i < args.length; ++i) { argTypes[i] = args[i].getClass(); } Method method = clazz.getDeclaredMethod(methodName, argTypes); return method.invoke(null, args); } }
1
9,366
Use IOUtils instead
azkaban-azkaban
java
@@ -27,6 +27,8 @@ type idmFake struct { unlockFails bool } +// NewIdentityManagerFake creates fake identity manager for testing purposes +// TODO each caller should use it's own mocked manager part instead of global one func NewIdentityManagerFake(existingIdentities []Identity, newIdentity Identity) *idmFake { return &idmFake{"", "", existingIdentities, newIdentity, false} }
1
/* * Copyright (C) 2017 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package identity import "errors" type idmFake struct { LastUnlockAddress string LastUnlockPassphrase string existingIdentities []Identity newIdentity Identity unlockFails bool } func NewIdentityManagerFake(existingIdentities []Identity, newIdentity Identity) *idmFake { return &idmFake{"", "", existingIdentities, newIdentity, false} } func (fakeIdm *idmFake) MarkUnlockToFail() { fakeIdm.unlockFails = true } func (fakeIdm *idmFake) CreateNewIdentity(_ string) (Identity, error) { return fakeIdm.newIdentity, nil } func (fakeIdm *idmFake) GetIdentities() []Identity { return fakeIdm.existingIdentities } func (fakeIdm *idmFake) GetIdentity(address string) (Identity, error) { for _, fakeIdentity := range fakeIdm.existingIdentities { if address == fakeIdentity.Address { return fakeIdentity, nil } } return Identity{}, errors.New("Identity not found") } func (fakeIdm *idmFake) HasIdentity(_ string) bool { return true } func (fakeIdm *idmFake) Unlock(address string, passphrase string) error { fakeIdm.LastUnlockAddress = address fakeIdm.LastUnlockPassphrase = passphrase if fakeIdm.unlockFails { return errors.New("Unlock failed") } return nil }
1
13,089
It's not related to the line of code, but I don't know how to write it in a more proper place. Looks like `e2e/myst-provider/db/myst.db` should be in `.gitignore` file and should not be commited.
mysteriumnetwork-node
go
@@ -22,6 +22,7 @@ from selenium.webdriver.common.desired_capabilities import DesiredCapabilities class Options(object): + KEY = "goog:chromeOptions" def __init__(self): self._binary_location = ''
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import base64 import os from selenium.webdriver.common.desired_capabilities import DesiredCapabilities class Options(object): def __init__(self): self._binary_location = '' self._arguments = [] self._extension_files = [] self._extensions = [] self._experimental_options = {} self._debugger_address = None @property def binary_location(self): """ Returns the location of the binary otherwise an empty string """ return self._binary_location @binary_location.setter def binary_location(self, value): """ Allows you to set where the chromium binary lives :Args: - value: path to the Chromium binary """ self._binary_location = value @property def debugger_address(self): """ Returns the address of the remote devtools instance """ return self._debugger_address @debugger_address.setter def debugger_address(self, value): """ Allows you to set the address of the remote devtools instance that the ChromeDriver instance will try to connect to during an active wait. :Args: - value: address of remote devtools instance if any (hostname[:port]) """ self._debugger_address = value @property def arguments(self): """ Returns a list of arguments needed for the browser """ return self._arguments def add_argument(self, argument): """ Adds an argument to the list :Args: - Sets the arguments """ if argument: self._arguments.append(argument) else: raise ValueError("argument can not be null") @property def extensions(self): """ Returns a list of encoded extensions that will be loaded into chrome """ encoded_extensions = [] for ext in self._extension_files: file_ = open(ext, 'rb') # Should not use base64.encodestring() which inserts newlines every # 76 characters (per RFC 1521). Chromedriver has to remove those # unnecessary newlines before decoding, causing performance hit. encoded_extensions.append(base64.b64encode(file_.read()).decode('UTF-8')) file_.close() return encoded_extensions + self._extensions def add_extension(self, extension): """ Adds the path to the extension to a list that will be used to extract it to the ChromeDriver :Args: - extension: path to the \*.crx file """ if extension: extension_to_add = os.path.abspath(os.path.expanduser(extension)) if os.path.exists(extension_to_add): self._extension_files.append(extension_to_add) else: raise IOError("Path to the extension doesn't exist") else: raise ValueError("argument can not be null") def add_encoded_extension(self, extension): """ Adds Base64 encoded string with extension data to a list that will be used to extract it to the ChromeDriver :Args: - extension: Base64 encoded string with extension data """ if extension: self._extensions.append(extension) else: raise ValueError("argument can not be null") @property def experimental_options(self): """ Returns a dictionary of experimental options for chrome. """ return self._experimental_options def add_experimental_option(self, name, value): """ Adds an experimental option which is passed to chrome. Args: name: The experimental option name. value: The option value. """ self._experimental_options[name] = value def to_capabilities(self): """ Creates a capabilities with all the options that have been set and returns a dictionary with everything """ chrome = DesiredCapabilities.CHROME.copy() chrome_options = self.experimental_options.copy() chrome_options["extensions"] = self.extensions if self.binary_location: chrome_options["binary"] = self.binary_location chrome_options["args"] = self.arguments if self.debugger_address: chrome_options["debuggerAddress"] = self.debugger_address chrome["goog:chromeOptions"] = chrome_options return chrome
1
14,945
nice touch since Google likes to change things every now and then.
SeleniumHQ-selenium
rb
@@ -1174,7 +1174,8 @@ public final class SolrCore implements SolrInfoBean, Closeable { } private CircuitBreakerManager initCircuitBreakerManager() { - CircuitBreakerManager circuitBreakerManager = CircuitBreakerManager.build(solrConfig); + final PluginInfo info = solrConfig.getPluginInfo(CircuitBreakerManager.class.getName()); + CircuitBreakerManager circuitBreakerManager = CircuitBreakerManager.build(info.initArgs); return circuitBreakerManager; }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.core; import java.io.Closeable; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.lang.invoke.MethodHandles; import java.lang.reflect.Constructor; import java.nio.charset.StandardCharsets; import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Properties; import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantLock; import com.codahale.metrics.Counter; import com.codahale.metrics.Timer; import com.google.common.collect.Iterators; import com.google.common.collect.MapMaker; import org.apache.commons.io.FileUtils; import org.apache.lucene.analysis.util.ResourceLoader; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexDeletionPolicy; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.LockObtainFailedException; import org.apache.solr.client.solrj.impl.BinaryResponseParser; import org.apache.solr.cloud.CloudDescriptor; import org.apache.solr.cloud.RecoveryStrategy; import org.apache.solr.cloud.ZkSolrResourceLoader; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.cloud.ClusterState; import org.apache.solr.common.cloud.DocCollection; import org.apache.solr.common.cloud.Slice; import org.apache.solr.common.cloud.SolrZkClient; import org.apache.solr.common.params.CollectionAdminParams; import org.apache.solr.common.params.CommonParams; import org.apache.solr.common.params.CommonParams.EchoParamStyle; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.params.UpdateParams; import org.apache.solr.common.util.ExecutorUtil; import org.apache.solr.common.util.IOUtils; import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.ObjectReleaseTracker; import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.common.util.SolrNamedThreadFactory; import org.apache.solr.common.util.Utils; import org.apache.solr.core.DirectoryFactory.DirContext; import org.apache.solr.core.snapshots.SolrSnapshotManager; import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager; import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager.SnapshotMetaData; import org.apache.solr.handler.IndexFetcher; import org.apache.solr.handler.ReplicationHandler; import org.apache.solr.handler.RequestHandlerBase; import org.apache.solr.handler.SolrConfigHandler; import org.apache.solr.handler.component.HighlightComponent; import org.apache.solr.handler.component.SearchComponent; import org.apache.solr.logging.MDCLoggingContext; import org.apache.solr.metrics.SolrCoreMetricManager; import org.apache.solr.metrics.SolrMetricProducer; import org.apache.solr.metrics.SolrMetricsContext; import org.apache.solr.pkg.*; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrRequestHandler; import org.apache.solr.response.BinaryResponseWriter; import org.apache.solr.response.CSVResponseWriter; import org.apache.solr.response.GeoJSONResponseWriter; import org.apache.solr.response.GraphMLResponseWriter; import org.apache.solr.response.JSONResponseWriter; import org.apache.solr.response.PHPResponseWriter; import org.apache.solr.response.PHPSerializedResponseWriter; import org.apache.solr.response.PythonResponseWriter; import org.apache.solr.response.QueryResponseWriter; import org.apache.solr.response.RawResponseWriter; import org.apache.solr.response.RubyResponseWriter; import org.apache.solr.response.SchemaXmlResponseWriter; import org.apache.solr.response.SmileResponseWriter; import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.response.XMLResponseWriter; import org.apache.solr.response.transform.TransformerFactory; import org.apache.solr.rest.ManagedResourceStorage; import org.apache.solr.rest.ManagedResourceStorage.StorageIO; import org.apache.solr.rest.RestManager; import org.apache.solr.schema.FieldType; import org.apache.solr.schema.IndexSchema; import org.apache.solr.schema.ManagedIndexSchema; import org.apache.solr.schema.SimilarityFactory; import org.apache.solr.search.QParserPlugin; import org.apache.solr.search.SolrFieldCacheBean; import org.apache.solr.search.SolrIndexSearcher; import org.apache.solr.search.ValueSourceParser; import org.apache.solr.search.stats.LocalStatsCache; import org.apache.solr.search.stats.StatsCache; import org.apache.solr.update.DefaultSolrCoreState; import org.apache.solr.update.DirectUpdateHandler2; import org.apache.solr.update.IndexFingerprint; import org.apache.solr.update.SolrCoreState; import org.apache.solr.update.SolrCoreState.IndexWriterCloser; import org.apache.solr.update.SolrIndexWriter; import org.apache.solr.update.UpdateHandler; import org.apache.solr.update.VersionInfo; import org.apache.solr.update.processor.DistributedUpdateProcessorFactory; import org.apache.solr.update.processor.LogUpdateProcessorFactory; import org.apache.solr.update.processor.NestedUpdateProcessorFactory; import org.apache.solr.update.processor.RunUpdateProcessorFactory; import org.apache.solr.update.processor.UpdateRequestProcessorChain; import org.apache.solr.update.processor.UpdateRequestProcessorChain.ProcessorInfo; import org.apache.solr.update.processor.UpdateRequestProcessorFactory; import org.apache.solr.util.IOFunction; import org.apache.solr.util.NumberUtils; import org.apache.solr.util.PropertiesInputStream; import org.apache.solr.util.PropertiesOutputStream; import org.apache.solr.util.RefCounted; import org.apache.solr.util.TestInjection; import org.apache.solr.util.circuitbreaker.CircuitBreakerManager; import org.apache.solr.util.plugin.NamedListInitializedPlugin; import org.apache.solr.util.plugin.PluginInfoInitialized; import org.apache.solr.util.plugin.SolrCoreAware; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.solr.common.params.CommonParams.NAME; import static org.apache.solr.common.params.CommonParams.PATH; /** * SolrCore got its name because it represents the "core" of Solr -- one index and everything needed to make it work. * When multi-core support was added to Solr way back in version 1.3, this class was required so that the core * functionality could be re-used multiple times. */ public final class SolrCore implements SolrInfoBean, Closeable { public static final String version = "1.0"; private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private static final Logger requestLog = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass().getName() + ".Request"); private static final Logger slowLog = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass().getName() + ".SlowRequest"); private String name; private String logid; // used to show what name is set /** * A unique id to differentiate multiple instances of the same core * If we reload a core, the name remains same , but the id will be new */ public final UUID uniqueId = UUID.randomUUID(); private boolean isReloaded = false; private final CoreDescriptor coreDescriptor; private final CoreContainer coreContainer; private final SolrConfig solrConfig; private final SolrResourceLoader resourceLoader; private volatile IndexSchema schema; @SuppressWarnings({"rawtypes"}) private final NamedList configSetProperties; private final String dataDir; private final String ulogDir; private final UpdateHandler updateHandler; private final SolrCoreState solrCoreState; private final Date startTime = new Date(); private final long startNanoTime = System.nanoTime(); private final RequestHandlers reqHandlers; private final PluginBag<SearchComponent> searchComponents = new PluginBag<>(SearchComponent.class, this); private final PluginBag<UpdateRequestProcessorFactory> updateProcessors = new PluginBag<>(UpdateRequestProcessorFactory.class, this, true); private final Map<String, UpdateRequestProcessorChain> updateProcessorChains; private final SolrCoreMetricManager coreMetricManager; private final Map<String, SolrInfoBean> infoRegistry = new ConcurrentHashMap<>(); private final IndexDeletionPolicyWrapper solrDelPolicy; private final SolrSnapshotMetaDataManager snapshotMgr; private final DirectoryFactory directoryFactory; private final RecoveryStrategy.Builder recoveryStrategyBuilder; private IndexReaderFactory indexReaderFactory; private final Codec codec; //singleton listener for all packages used in schema private final PackageListeningClassLoader schemaPluginsLoader; private final CircuitBreakerManager circuitBreakerManager; private final List<Runnable> confListeners = new CopyOnWriteArrayList<>(); private final ReentrantLock ruleExpiryLock; private final ReentrantLock snapshotDelLock; // A lock instance to guard against concurrent deletions. private Timer newSearcherTimer; private Timer newSearcherWarmupTimer; private Counter newSearcherCounter; private Counter newSearcherMaxReachedCounter; private Counter newSearcherOtherErrorsCounter; private final String metricTag = SolrMetricProducer.getUniqueMetricTag(this, null); private final SolrMetricsContext solrMetricsContext; public volatile boolean searchEnabled = true; public volatile boolean indexEnabled = true; public volatile boolean readOnly = false; private PackageListeners packageListeners = new PackageListeners(this); public Date getStartTimeStamp() { return startTime; } private final Map<IndexReader.CacheKey, IndexFingerprint> perSegmentFingerprintCache = new MapMaker().weakKeys().makeMap(); public long getStartNanoTime() { return startNanoTime; } public long getUptimeMs() { return TimeUnit.MILLISECONDS.convert(System.nanoTime() - startNanoTime, TimeUnit.NANOSECONDS); } private final RestManager restManager; public RestManager getRestManager() { return restManager; } public PackageListeners getPackageListeners() { return packageListeners; } public PackageListeningClassLoader getSchemaPluginsLoader() { return schemaPluginsLoader; } static int boolean_query_max_clause_count = Integer.MIN_VALUE; private ExecutorService coreAsyncTaskExecutor = ExecutorUtil.newMDCAwareCachedThreadPool("Core Async Task"); /** * The SolrResourceLoader used to load all resources for this core. * * @since solr 1.3 */ public SolrResourceLoader getResourceLoader() { return resourceLoader; } /** Gets the SolrResourceLoader for a given package * @param pkg The package name */ public SolrResourceLoader getResourceLoader(String pkg) { if (pkg == null) { return resourceLoader; } PackageLoader.Package aPackage = coreContainer.getPackageLoader().getPackage(pkg); PackageLoader.Package.Version latest = aPackage.getLatest(); return latest.getLoader(); } /** * Gets the configuration resource name used by this core instance. * * @since solr 1.3 */ public String getConfigResource() { return solrConfig.getResourceName(); } /** * Gets the configuration object used by this core instance. */ public SolrConfig getSolrConfig() { return solrConfig; } /** * Gets the schema resource name used by this core instance. * * @since solr 1.3 */ public String getSchemaResource() { return getLatestSchema().getResourceName(); } /** * @return the latest snapshot of the schema used by this core instance. * @see #setLatestSchema */ public IndexSchema getLatestSchema() { return schema; } /** The core's instance directory (absolute). */ public Path getInstancePath() { return getCoreDescriptor().getInstanceDir(); } /** * Sets the latest schema snapshot to be used by this core instance. * If the specified <code>replacementSchema</code> uses a {@link SimilarityFactory} which is * {@link SolrCoreAware} then this method will {@link SolrCoreAware#inform} that factory about * this SolrCore prior to using the <code>replacementSchema</code> * * @see #getLatestSchema */ public void setLatestSchema(IndexSchema replacementSchema) { // 1) For a newly instantiated core, the Similarity needs SolrCore before inform() is called on // any registered SolrCoreAware listeners (which will likeley need to use the SolrIndexSearcher. // // 2) If a new IndexSchema is assigned to an existing live SolrCore (ie: managed schema // replacement via SolrCloud) then we need to explicitly inform() the similarity because // we can't rely on the normal SolrResourceLoader lifecycle because the sim was instantiated // after the SolrCore was already live (see: SOLR-8311 + SOLR-8280) final SimilarityFactory similarityFactory = replacementSchema.getSimilarityFactory(); if (similarityFactory instanceof SolrCoreAware) { ((SolrCoreAware) similarityFactory).inform(this); } this.schema = replacementSchema; } @SuppressWarnings({"rawtypes"}) public NamedList getConfigSetProperties() { return configSetProperties; } public String getDataDir() { return dataDir; } public String getUlogDir() { return ulogDir; } public String getIndexDir() { synchronized (searcherLock) { if (_searcher == null) return getNewIndexDir(); SolrIndexSearcher searcher = _searcher.get(); return searcher.getPath() == null ? dataDir + "index/" : searcher .getPath(); } } /** * Returns the indexdir as given in index.properties. If index.properties exists in dataDir and * there is a property <i>index</i> available and it points to a valid directory * in dataDir that is returned. Else dataDir/index is returned. Only called for creating new indexSearchers * and indexwriters. Use the getIndexDir() method to know the active index directory * * @return the indexdir as given in index.properties * @throws SolrException if for any reason the a reasonable index directory cannot be determined. */ public String getNewIndexDir() { Directory dir = null; try { dir = getDirectoryFactory().get(getDataDir(), DirContext.META_DATA, getSolrConfig().indexConfig.lockType); String result = getIndexPropertyFromPropFile(dir); if (!result.equals(lastNewIndexDir)) { log.debug("New index directory detected: old={} new={}", lastNewIndexDir, result); } lastNewIndexDir = result; return result; } catch (IOException e) { SolrException.log(log, "", e); // See SOLR-11687. It is inadvisable to assume we can do the right thing for any but a small // number of exceptions that ware caught and swallowed in getIndexProperty. throw new SolrException(ErrorCode.SERVER_ERROR, "Error in getNewIndexDir, exception: ", e); } finally { if (dir != null) { try { getDirectoryFactory().release(dir); } catch (IOException e) { SolrException.log(log, "", e); } } } } // This is guaranteed to return a string or throw an exception. // // NOTE: Not finding the index.properties file is normal. // // We return dataDir/index if there is an index.properties file with no value for "index" // See SOLR-11687 // private String getIndexPropertyFromPropFile(Directory dir) throws IOException { IndexInput input; try { input = dir.openInput(IndexFetcher.INDEX_PROPERTIES, IOContext.DEFAULT); } catch (FileNotFoundException | NoSuchFileException e) { // Swallow this error, dataDir/index is the right thing to return // if there is no index.properties file // All other exceptions are will propagate to caller. return dataDir + "index/"; } final InputStream is = new PropertiesInputStream(input); // c'tor just assigns a variable here, no exception thrown. try { Properties p = new Properties(); p.load(new InputStreamReader(is, StandardCharsets.UTF_8)); String s = p.getProperty("index"); if (s != null && s.trim().length() > 0) { return dataDir + s.trim(); } // We'll return dataDir/index/ if the properties file has an "index" property with // no associated value or does not have an index property at all. return dataDir + "index/"; } finally { IOUtils.closeQuietly(is); } } private String lastNewIndexDir; // for debugging purposes only... access not synchronized, but that's ok public DirectoryFactory getDirectoryFactory() { return directoryFactory; } public IndexReaderFactory getIndexReaderFactory() { return indexReaderFactory; } public long getIndexSize() { Directory dir; long size = 0; try { if (directoryFactory.exists(getIndexDir())) { dir = directoryFactory.get(getIndexDir(), DirContext.DEFAULT, solrConfig.indexConfig.lockType); try { size = DirectoryFactory.sizeOfDirectory(dir); } finally { directoryFactory.release(dir); } } } catch (IOException e) { SolrException.log(log, "IO error while trying to get the size of the Directory", e); } return size; } @Override public String getName() { return name; } public void setName(String v) { Objects.requireNonNull(v); boolean renamed = this.name != null && !this.name.equals(v); assert !renamed || coreDescriptor.getCloudDescriptor() == null : "Cores are not renamed in SolrCloud"; this.name = v; this.logid = "[" + v + "] "; // TODO remove; obsoleted by MDC if (renamed && coreMetricManager != null) { coreMetricManager.afterCoreRename(); } } public String getLogId() { return this.logid; } /** * Returns the {@link SolrCoreMetricManager} for this core. * * @return the {@link SolrCoreMetricManager} for this core */ public SolrCoreMetricManager getCoreMetricManager() { return coreMetricManager; } /** * Returns a Map of name vs SolrInfoBean objects. The returned map is an instance of * a ConcurrentHashMap and therefore no synchronization is needed for putting, removing * or iterating over it. * * @return the Info Registry map which contains SolrInfoBean objects keyed by name * @since solr 1.3 */ public Map<String, SolrInfoBean> getInfoRegistry() { return infoRegistry; } private IndexDeletionPolicyWrapper initDeletionPolicy(IndexDeletionPolicyWrapper delPolicyWrapper) { if (delPolicyWrapper != null) { return delPolicyWrapper; } final PluginInfo info = solrConfig.getPluginInfo(IndexDeletionPolicy.class.getName()); final IndexDeletionPolicy delPolicy; if (info != null) { delPolicy = createInstance(info.className, IndexDeletionPolicy.class, "Deletion Policy for SOLR", this, getResourceLoader()); if (delPolicy instanceof NamedListInitializedPlugin) { ((NamedListInitializedPlugin) delPolicy).init(info.initArgs); } } else { delPolicy = new SolrDeletionPolicy(); } return new IndexDeletionPolicyWrapper(delPolicy, snapshotMgr); } private SolrSnapshotMetaDataManager initSnapshotMetaDataManager() { try { String dirName = getDataDir() + SolrSnapshotMetaDataManager.SNAPSHOT_METADATA_DIR + "/"; Directory snapshotDir = directoryFactory.get(dirName, DirContext.DEFAULT, getSolrConfig().indexConfig.lockType); return new SolrSnapshotMetaDataManager(this, snapshotDir); } catch (IOException e) { throw new IllegalStateException(e); } } /** * This method deletes the snapshot with the specified name. If the directory * storing the snapshot is not the same as the *current* core index directory, * then delete the files corresponding to this snapshot. Otherwise we leave the * index files related to snapshot as is (assuming the underlying Solr IndexDeletionPolicy * will clean them up appropriately). * * @param commitName The name of the snapshot to be deleted. * @throws IOException in case of I/O error. */ public void deleteNamedSnapshot(String commitName) throws IOException { // Note this lock is required to prevent multiple snapshot deletions from // opening multiple IndexWriter instances simultaneously. this.snapshotDelLock.lock(); try { Optional<SnapshotMetaData> metadata = snapshotMgr.release(commitName); if (metadata.isPresent()) { long gen = metadata.get().getGenerationNumber(); String indexDirPath = metadata.get().getIndexDirPath(); if (!indexDirPath.equals(getIndexDir())) { Directory d = getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, "none"); try { Collection<SnapshotMetaData> snapshots = snapshotMgr.listSnapshotsInIndexDir(indexDirPath); log.info("Following snapshots exist in the index directory {} : {}", indexDirPath, snapshots); if (snapshots.isEmpty()) {// No snapshots remain in this directory. Can be cleaned up! log.info("Removing index directory {} since all named snapshots are deleted.", indexDirPath); getDirectoryFactory().remove(d); } else { SolrSnapshotManager.deleteSnapshotIndexFiles(this, d, gen); } } finally { getDirectoryFactory().release(d); } } } } finally { snapshotDelLock.unlock(); } } /** * This method deletes the index files not associated with any named snapshot only * if the specified indexDirPath is not the *current* index directory. * * @param indexDirPath The path of the directory * @throws IOException In case of I/O error. */ public void deleteNonSnapshotIndexFiles(String indexDirPath) throws IOException { // Skip if the specified indexDirPath is the *current* index directory. if (getIndexDir().equals(indexDirPath)) { return; } // Note this lock is required to prevent multiple snapshot deletions from // opening multiple IndexWriter instances simultaneously. this.snapshotDelLock.lock(); Directory dir = getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, "none"); try { Collection<SnapshotMetaData> snapshots = snapshotMgr.listSnapshotsInIndexDir(indexDirPath); log.info("Following snapshots exist in the index directory {} : {}", indexDirPath, snapshots); // Delete the old index directory only if no snapshot exists in that directory. if (snapshots.isEmpty()) { log.info("Removing index directory {} since all named snapshots are deleted.", indexDirPath); getDirectoryFactory().remove(dir); } else { SolrSnapshotManager.deleteNonSnapshotIndexFiles(this, dir, snapshots); } } finally { snapshotDelLock.unlock(); if (dir != null) { getDirectoryFactory().release(dir); } } } private void initListeners() { final Class<SolrEventListener> clazz = SolrEventListener.class; final String label = "Event Listener"; for (PluginInfo info : solrConfig.getPluginInfos(SolrEventListener.class.getName())) { final String event = info.attributes.get("event"); if ("firstSearcher".equals(event)) { SolrEventListener obj = createInitInstance(info, clazz, label, null); firstSearcherListeners.add(obj); log.debug("[{}] Added SolrEventListener for firstSearcher: [{}]", logid, obj); } else if ("newSearcher".equals(event)) { SolrEventListener obj = createInitInstance(info, clazz, label, null); newSearcherListeners.add(obj); log.debug("[{}] Added SolrEventListener for newSearcher: [{}]", logid, obj); } } } final List<SolrEventListener> firstSearcherListeners = new ArrayList<>(); final List<SolrEventListener> newSearcherListeners = new ArrayList<>(); /** * NOTE: this function is not thread safe. However, it is safe to call within the * <code>inform( SolrCore core )</code> function for <code>SolrCoreAware</code> classes. * Outside <code>inform</code>, this could potentially throw a ConcurrentModificationException * * @see SolrCoreAware */ public void registerFirstSearcherListener(SolrEventListener listener) { firstSearcherListeners.add(listener); } /** * NOTE: this function is not thread safe. However, it is safe to call within the * <code>inform( SolrCore core )</code> function for <code>SolrCoreAware</code> classes. * Outside <code>inform</code>, this could potentially throw a ConcurrentModificationException * * @see SolrCoreAware */ public void registerNewSearcherListener(SolrEventListener listener) { newSearcherListeners.add(listener); } /** * NOTE: this function is not thread safe. However, it is safe to call within the * <code>inform( SolrCore core )</code> function for <code>SolrCoreAware</code> classes. * Outside <code>inform</code>, this could potentially throw a ConcurrentModificationException * * @see SolrCoreAware */ public QueryResponseWriter registerResponseWriter(String name, QueryResponseWriter responseWriter) { return responseWriters.put(name, responseWriter); } public SolrCore reload(ConfigSet coreConfig) throws IOException { // only one reload at a time synchronized (getUpdateHandler().getSolrCoreState().getReloadLock()) { solrCoreState.increfSolrCoreState(); final SolrCore currentCore; if (!getNewIndexDir().equals(getIndexDir())) { // the directory is changing, don't pass on state currentCore = null; } else { currentCore = this; } boolean success = false; SolrCore core = null; try { CoreDescriptor cd = new CoreDescriptor(name, getCoreDescriptor()); cd.loadExtraProperties(); //Reload the extra properties core = new SolrCore(coreContainer, cd, coreConfig, getDataDir(), updateHandler, solrDelPolicy, currentCore, true); // we open a new IndexWriter to pick up the latest config core.getUpdateHandler().getSolrCoreState().newIndexWriter(core, false); core.getSearcher(true, false, null, true); success = true; return core; } finally { // close the new core on any errors that have occurred. if (!success && core != null && core.getOpenCount() > 0) { IOUtils.closeQuietly(core); } } } } private DirectoryFactory initDirectoryFactory() { return DirectoryFactory.loadDirectoryFactory(solrConfig, coreContainer, coreMetricManager.getRegistryName()); } private RecoveryStrategy.Builder initRecoveryStrategyBuilder() { final PluginInfo info = solrConfig.getPluginInfo(RecoveryStrategy.Builder.class.getName()); final RecoveryStrategy.Builder rsBuilder; if (info != null && info.className != null) { log.info(info.className); rsBuilder = getResourceLoader().newInstance(info.className, RecoveryStrategy.Builder.class); } else { log.debug("solr.RecoveryStrategy.Builder"); rsBuilder = new RecoveryStrategy.Builder(); } if (info != null) { rsBuilder.init(info.initArgs); } return rsBuilder; } private void initIndexReaderFactory() { IndexReaderFactory indexReaderFactory; PluginInfo info = solrConfig.getPluginInfo(IndexReaderFactory.class.getName()); if (info != null) { indexReaderFactory = resourceLoader.newInstance(info.className, IndexReaderFactory.class); indexReaderFactory.init(info.initArgs); } else { indexReaderFactory = new StandardIndexReaderFactory(); } this.indexReaderFactory = indexReaderFactory; } // protect via synchronized(SolrCore.class) private static Set<String> dirs = new HashSet<>(); /** * Returns <code>true</code> iff the index in the named directory is * currently locked. * * @param directory the directory to check for a lock * @throws IOException if there is a low-level IO error * @deprecated Use of this method can only lead to race conditions. Try * to actually obtain a lock instead. */ @Deprecated private static boolean isWriterLocked(Directory directory) throws IOException { try { directory.obtainLock(IndexWriter.WRITE_LOCK_NAME).close(); return false; } catch (LockObtainFailedException failed) { return true; } } void initIndex(boolean passOnPreviousState, boolean reload) throws IOException { String indexDir = getNewIndexDir(); boolean indexExists = getDirectoryFactory().exists(indexDir); boolean firstTime; synchronized (SolrCore.class) { firstTime = dirs.add(getDirectoryFactory().normalize(indexDir)); } initIndexReaderFactory(); if (indexExists && firstTime && !passOnPreviousState) { final String lockType = getSolrConfig().indexConfig.lockType; Directory dir = directoryFactory.get(indexDir, DirContext.DEFAULT, lockType); try { if (isWriterLocked(dir)) { log.error("{}Solr index directory '{}' is locked (lockType={}). Throwing exception.", logid, indexDir, lockType); throw new LockObtainFailedException( "Index dir '" + indexDir + "' of core '" + name + "' is already locked. " + "The most likely cause is another Solr server (or another solr core in this server) " + "also configured to use this directory; other possible causes may be specific to lockType: " + lockType); } } finally { directoryFactory.release(dir); } } // Create the index if it doesn't exist. if (!indexExists) { log.debug("{}Solr index directory '{}' doesn't exist. Creating new index...", logid, indexDir); SolrIndexWriter writer = null; try { writer = SolrIndexWriter.create(this, "SolrCore.initIndex", indexDir, getDirectoryFactory(), true, getLatestSchema(), solrConfig.indexConfig, solrDelPolicy, codec); } finally { IOUtils.closeQuietly(writer); } } cleanupOldIndexDirectories(reload); } /** * Creates an instance by trying a constructor that accepts a SolrCore before * trying the default (no arg) constructor. * * @param className the instance class to create * @param cast the class or interface that the instance should extend or implement * @param msg a message helping compose the exception error if any occurs. * @param core The SolrCore instance for which this object needs to be loaded * @return the desired instance * @throws SolrException if the object could not be instantiated */ public static <T> T createInstance(String className, Class<T> cast, String msg, SolrCore core, ResourceLoader resourceLoader) { Class<? extends T> clazz = null; if (msg == null) msg = "SolrCore Object"; try { clazz = resourceLoader.findClass(className, cast); //most of the classes do not have constructors which takes SolrCore argument. It is recommended to obtain SolrCore by implementing SolrCoreAware. // So invariably always it will cause a NoSuchMethodException. So iterate though the list of available constructors Constructor<?>[] cons = clazz.getConstructors(); for (Constructor<?> con : cons) { Class<?>[] types = con.getParameterTypes(); if (types.length == 1 && types[0] == SolrCore.class) { return cast.cast(con.newInstance(core)); } } return resourceLoader.newInstance(className, cast);//use the empty constructor } catch (SolrException e) { throw e; } catch (Exception e) { // The JVM likes to wrap our helpful SolrExceptions in things like // "InvocationTargetException" that have no useful getMessage if (null != e.getCause() && e.getCause() instanceof SolrException) { SolrException inner = (SolrException) e.getCause(); throw inner; } throw new SolrException(ErrorCode.SERVER_ERROR, "Error Instantiating " + msg + ", " + className + " failed to instantiate " + cast.getName(), e); } } private UpdateHandler createReloadedUpdateHandler(String className, String msg, UpdateHandler updateHandler) { Class<? extends UpdateHandler> clazz = null; if (msg == null) msg = "SolrCore Object"; try { clazz = getResourceLoader().findClass(className, UpdateHandler.class); //most of the classes do not have constructors which takes SolrCore argument. It is recommended to obtain SolrCore by implementing SolrCoreAware. // So invariably always it will cause a NoSuchMethodException. So iterate though the list of available constructors Constructor<?>[] cons = clazz.getConstructors(); for (Constructor<?> con : cons) { Class<?>[] types = con.getParameterTypes(); if (types.length == 2 && types[0] == SolrCore.class && types[1] == UpdateHandler.class) { return UpdateHandler.class.cast(con.newInstance(this, updateHandler)); } } throw new SolrException(ErrorCode.SERVER_ERROR, "Error Instantiating " + msg + ", " + className + " could not find proper constructor for " + UpdateHandler.class.getName()); } catch (SolrException e) { throw e; } catch (Exception e) { // The JVM likes to wrap our helpful SolrExceptions in things like // "InvocationTargetException" that have no useful getMessage if (null != e.getCause() && e.getCause() instanceof SolrException) { SolrException inner = (SolrException) e.getCause(); throw inner; } throw new SolrException(ErrorCode.SERVER_ERROR, "Error Instantiating " + msg + ", " + className + " failed to instantiate " + UpdateHandler.class.getName(), e); } } public <T extends Object> T createInitInstance(PluginInfo info, Class<T> cast, String msg, String defClassName) { if (info == null) return null; T o = createInstance(info.className == null ? defClassName : info.className, cast, msg, this, getResourceLoader(info.pkgName)); return initPlugin(info, o); } public static <T extends Object> T initPlugin(PluginInfo info, T o) { if (o instanceof PluginInfoInitialized) { ((PluginInfoInitialized) o).init(info); } else if (o instanceof NamedListInitializedPlugin) { ((NamedListInitializedPlugin) o).init(info.initArgs); } if (o instanceof SearchComponent) { ((SearchComponent) o).setName(info.name); } return o; } private UpdateHandler createUpdateHandler(String className) { return createInstance(className, UpdateHandler.class, "Update Handler", this, getResourceLoader()); } private UpdateHandler createUpdateHandler(String className, UpdateHandler updateHandler) { return createReloadedUpdateHandler(className, "Update Handler", updateHandler); } public SolrCore(CoreContainer coreContainer, CoreDescriptor cd, ConfigSet configSet) { this(coreContainer, cd, configSet, null, null, null, null, false); } public CoreContainer getCoreContainer() { return coreContainer; } /** * Creates a new core and register it in the list of cores. If a core with the * same name already exists, it will be stopped and replaced by this one. */ private SolrCore(CoreContainer coreContainer, CoreDescriptor coreDescriptor, ConfigSet configSet, String dataDir, UpdateHandler updateHandler, IndexDeletionPolicyWrapper delPolicy, SolrCore prev, boolean reload) { assert ObjectReleaseTracker.track(searcherExecutor); // ensure that in unclean shutdown tests we still close this final CountDownLatch latch = new CountDownLatch(1); try { this.coreContainer = coreContainer; this.coreDescriptor = Objects.requireNonNull(coreDescriptor, "coreDescriptor cannot be null"); setName(coreDescriptor.getName()); this.solrConfig = configSet.getSolrConfig(); this.resourceLoader = configSet.getSolrConfig().getResourceLoader(); schemaPluginsLoader = new PackageListeningClassLoader(coreContainer, resourceLoader, solrConfig::maxPackageVersion, () -> setLatestSchema(configSet.getIndexSchema())); this.packageListeners.addListener(schemaPluginsLoader); IndexSchema schema = configSet.getIndexSchema(); this.configSetProperties = configSet.getProperties(); // Initialize the metrics manager this.coreMetricManager = initCoreMetricManager(solrConfig); this.circuitBreakerManager = initCircuitBreakerManager(); solrMetricsContext = coreMetricManager.getSolrMetricsContext(); this.coreMetricManager.loadReporters(); if (updateHandler == null) { directoryFactory = initDirectoryFactory(); recoveryStrategyBuilder = initRecoveryStrategyBuilder(); solrCoreState = new DefaultSolrCoreState(directoryFactory, recoveryStrategyBuilder); } else { solrCoreState = updateHandler.getSolrCoreState(); directoryFactory = solrCoreState.getDirectoryFactory(); recoveryStrategyBuilder = solrCoreState.getRecoveryStrategyBuilder(); isReloaded = true; } this.dataDir = initDataDir(dataDir, solrConfig, coreDescriptor); this.ulogDir = initUpdateLogDir(coreDescriptor); if (log.isInfoEnabled()) { log.info("[{}] Opening new SolrCore at [{}], dataDir=[{}]", logid, getInstancePath(), this.dataDir); } checkVersionFieldExistsInSchema(schema, coreDescriptor); setLatestSchema(schema); // initialize core metrics initializeMetrics(solrMetricsContext, null); SolrFieldCacheBean solrFieldCacheBean = new SolrFieldCacheBean(); // this is registered at the CONTAINER level because it's not core-specific - for now we // also register it here for back-compat solrFieldCacheBean.initializeMetrics(solrMetricsContext, "core"); infoRegistry.put("fieldCache", solrFieldCacheBean); this.maxWarmingSearchers = solrConfig.maxWarmingSearchers; this.slowQueryThresholdMillis = solrConfig.slowQueryThresholdMillis; initListeners(); this.snapshotMgr = initSnapshotMetaDataManager(); this.solrDelPolicy = initDeletionPolicy(delPolicy); this.codec = initCodec(solrConfig, this.schema); initIndex(prev != null, reload); initWriters(); qParserPlugins.init(QParserPlugin.standardPlugins, this); valueSourceParsers.init(ValueSourceParser.standardValueSourceParsers, this); transformerFactories.init(TransformerFactory.defaultFactories, this); loadSearchComponents(); updateProcessors.init(Collections.emptyMap(), this); // Processors initialized before the handlers updateProcessorChains = loadUpdateProcessorChains(); reqHandlers = new RequestHandlers(this); reqHandlers.initHandlersFromConfig(solrConfig); // cause the executor to stall so firstSearcher events won't fire // until after inform() has been called for all components. // searchExecutor must be single-threaded for this to work searcherExecutor.submit(() -> { latch.await(); return null; }); this.updateHandler = initUpdateHandler(updateHandler); initSearcher(prev); // Initialize the RestManager restManager = initRestManager(); // Finally tell anyone who wants to know resourceLoader.inform(resourceLoader); resourceLoader.inform(this); // last call before the latch is released. this.updateHandler.informEventListeners(this); infoRegistry.put("core", this); // register any SolrInfoMBeans SolrResourceLoader initialized // // this must happen after the latch is released, because a JMX server impl may // choose to block on registering until properties can be fetched from an MBean, // and a SolrCoreAware MBean may have properties that depend on getting a Searcher // from the core. resourceLoader.inform(infoRegistry); // Allow the directory factory to report metrics if (directoryFactory instanceof SolrMetricProducer) { ((SolrMetricProducer) directoryFactory).initializeMetrics(solrMetricsContext, "directoryFactory"); } // seed version buckets with max from index during core initialization ... requires a searcher! seedVersionBuckets(); bufferUpdatesIfConstructing(coreDescriptor); this.ruleExpiryLock = new ReentrantLock(); this.snapshotDelLock = new ReentrantLock(); registerConfListener(); } catch (Throwable e) { // release the latch, otherwise we block trying to do the close. This // should be fine, since counting down on a latch of 0 is still fine latch.countDown(); if (e instanceof OutOfMemoryError) { throw (OutOfMemoryError) e; } try { // close down the searcher and any other resources, if it exists, as this // is not recoverable close(); } catch (Throwable t) { if (t instanceof OutOfMemoryError) { throw (OutOfMemoryError) t; } log.error("Error while closing", t); } throw new SolrException(ErrorCode.SERVER_ERROR, e.getMessage(), e); } finally { // allow firstSearcher events to fire and make sure it is released latch.countDown(); } assert ObjectReleaseTracker.track(this); } public void seedVersionBuckets() { UpdateHandler uh = getUpdateHandler(); if (uh != null && uh.getUpdateLog() != null) { RefCounted<SolrIndexSearcher> newestSearcher = getRealtimeSearcher(); if (newestSearcher != null) { try { uh.getUpdateLog().seedBucketsWithHighestVersion(newestSearcher.get()); } finally { newestSearcher.decref(); } } else { log.warn("No searcher available! Cannot seed version buckets with max from index."); } } } /** * Set UpdateLog to buffer updates if the slice is in construction. */ private void bufferUpdatesIfConstructing(CoreDescriptor coreDescriptor) { if (coreContainer != null && coreContainer.isZooKeeperAware()) { if (reqHandlers.get("/get") == null) { log.warn("WARNING: RealTimeGetHandler is not registered at /get. SolrCloud will always use full index replication instead of the more efficient PeerSync method."); } // ZK pre-register would have already happened so we read slice properties now final ClusterState clusterState = coreContainer.getZkController().getClusterState(); final DocCollection collection = clusterState.getCollection(coreDescriptor.getCloudDescriptor().getCollectionName()); final Slice slice = collection.getSlice(coreDescriptor.getCloudDescriptor().getShardId()); if (slice.getState() == Slice.State.CONSTRUCTION) { // set update log to buffer before publishing the core getUpdateHandler().getUpdateLog().bufferUpdates(); } } } private void initSearcher(SolrCore prev) throws IOException { // use the (old) writer to open the first searcher RefCounted<IndexWriter> iwRef = null; if (prev != null) { iwRef = prev.getUpdateHandler().getSolrCoreState().getIndexWriter(null); if (iwRef != null) { final IndexWriter iw = iwRef.get(); final SolrCore core = this; newReaderCreator = () -> indexReaderFactory.newReader(iw, core); } } try { getSearcher(false, false, null, true); } finally { newReaderCreator = null; if (iwRef != null) { iwRef.decref(); } } } private UpdateHandler initUpdateHandler(UpdateHandler updateHandler) { String updateHandlerClass = solrConfig.getUpdateHandlerInfo().className; if (updateHandlerClass == null) { updateHandlerClass = DirectUpdateHandler2.class.getName(); } final UpdateHandler newUpdateHandler; if (updateHandler == null) { newUpdateHandler = createUpdateHandler(updateHandlerClass); } else { newUpdateHandler = createUpdateHandler(updateHandlerClass, updateHandler); } if (newUpdateHandler instanceof SolrMetricProducer) { coreMetricManager.registerMetricProducer("updateHandler", (SolrMetricProducer) newUpdateHandler); } infoRegistry.put("updateHandler", newUpdateHandler); return newUpdateHandler; } /** * Initializes the core's {@link SolrCoreMetricManager} with a given configuration. * If metric reporters are configured, they are also initialized for this core. * * @param config the given configuration * @return an instance of {@link SolrCoreMetricManager} */ private SolrCoreMetricManager initCoreMetricManager(SolrConfig config) { SolrCoreMetricManager coreMetricManager = new SolrCoreMetricManager(this); return coreMetricManager; } private CircuitBreakerManager initCircuitBreakerManager() { CircuitBreakerManager circuitBreakerManager = CircuitBreakerManager.build(solrConfig); return circuitBreakerManager; } @Override public void initializeMetrics(SolrMetricsContext parentContext, String scope) { newSearcherCounter = parentContext.counter("new", Category.SEARCHER.toString()); newSearcherTimer = parentContext.timer("time", Category.SEARCHER.toString(), "new"); newSearcherWarmupTimer = parentContext.timer("warmup", Category.SEARCHER.toString(), "new"); newSearcherMaxReachedCounter = parentContext.counter("maxReached", Category.SEARCHER.toString(), "new"); newSearcherOtherErrorsCounter = parentContext.counter("errors", Category.SEARCHER.toString(), "new"); parentContext.gauge(() -> name == null ? "(null)" : name, true, "coreName", Category.CORE.toString()); parentContext.gauge(() -> startTime, true, "startTime", Category.CORE.toString()); parentContext.gauge(() -> getOpenCount(), true, "refCount", Category.CORE.toString()); parentContext.gauge(() -> getInstancePath().toString(), true, "instanceDir", Category.CORE.toString()); parentContext.gauge(() -> isClosed() ? "(closed)" : getIndexDir(), true, "indexDir", Category.CORE.toString()); parentContext.gauge(() -> isClosed() ? 0 : getIndexSize(), true, "sizeInBytes", Category.INDEX.toString()); parentContext.gauge(() -> isClosed() ? "(closed)" : NumberUtils.readableSize(getIndexSize()), true, "size", Category.INDEX.toString()); if (coreContainer != null) { final CloudDescriptor cd = getCoreDescriptor().getCloudDescriptor(); if (cd != null) { parentContext.gauge(() -> { if (cd.getCollectionName() != null) { return cd.getCollectionName(); } else { return "_notset_"; } }, true, "collection", Category.CORE.toString()); parentContext.gauge(() -> { if (cd.getShardId() != null) { return cd.getShardId(); } else { return "_auto_"; } }, true, "shard", Category.CORE.toString()); } } // initialize disk total / free metrics Path dataDirPath = Paths.get(dataDir); File dataDirFile = dataDirPath.toFile(); parentContext.gauge(() -> dataDirFile.getTotalSpace(), true, "totalSpace", Category.CORE.toString(), "fs"); parentContext.gauge(() -> dataDirFile.getUsableSpace(), true, "usableSpace", Category.CORE.toString(), "fs"); parentContext.gauge(() -> dataDirPath.toAbsolutePath().toString(), true, "path", Category.CORE.toString(), "fs"); parentContext.gauge(() -> { try { return org.apache.lucene.util.IOUtils.spins(dataDirPath.toAbsolutePath()); } catch (IOException e) { // default to spinning return true; } }, true, "spins", Category.CORE.toString(), "fs"); } public String getMetricTag() { return metricTag; } @Override public SolrMetricsContext getSolrMetricsContext() { return solrMetricsContext; } private void checkVersionFieldExistsInSchema(IndexSchema schema, CoreDescriptor coreDescriptor) { if (null != coreDescriptor.getCloudDescriptor()) { // we are evidently running in cloud mode. // // In cloud mode, version field is required for correct consistency // ideally this check would be more fine grained, and individual features // would assert it when they initialize, but DistributedUpdateProcessor // is currently a big ball of wax that does more then just distributing // updates (ie: partial document updates), so it needs to work in no cloud // mode as well, and can't assert version field support on init. try { VersionInfo.getAndCheckVersionField(schema); } catch (SolrException e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Schema will not work with SolrCloud mode: " + e.getMessage(), e); } } } private String initDataDir(String dataDir, SolrConfig config, CoreDescriptor coreDescriptor) { return findDataDir(getDirectoryFactory(), dataDir, config, coreDescriptor); } /** * Locate the data directory for a given config and core descriptor. * * @param directoryFactory The directory factory to use if necessary to calculate an absolute path. Should be the same as what will * be used to open the data directory later. * @param dataDir An optional hint to the data directory location. Will be normalized and used if not null. * @param config A solr config to retrieve the default data directory location, if used. * @param coreDescriptor descriptor to load the actual data dir from, if not using the defualt. * @return a normalized data directory name * @throws SolrException if the data directory cannot be loaded from the core descriptor */ static String findDataDir(DirectoryFactory directoryFactory, String dataDir, SolrConfig config, CoreDescriptor coreDescriptor) { if (dataDir == null) { if (coreDescriptor.usingDefaultDataDir()) { dataDir = config.getDataDir(); } if (dataDir == null) { try { dataDir = coreDescriptor.getDataDir(); if (!directoryFactory.isAbsolute(dataDir)) { dataDir = directoryFactory.getDataHome(coreDescriptor); } } catch (IOException e) { throw new SolrException(ErrorCode.SERVER_ERROR, e); } } } return SolrPaths.normalizeDir(dataDir); } public boolean modifyIndexProps(String tmpIdxDirName) { return SolrCore.modifyIndexProps(getDirectoryFactory(), getDataDir(), getSolrConfig(), tmpIdxDirName); } /** * Update the index.properties file with the new index sub directory name */ // package private static boolean modifyIndexProps(DirectoryFactory directoryFactory, String dataDir, SolrConfig solrConfig, String tmpIdxDirName) { log.info("Updating index properties... index={}", tmpIdxDirName); Directory dir = null; try { dir = directoryFactory.get(dataDir, DirContext.META_DATA, solrConfig.indexConfig.lockType); String tmpIdxPropName = IndexFetcher.INDEX_PROPERTIES + "." + System.nanoTime(); writeNewIndexProps(dir, tmpIdxPropName, tmpIdxDirName); directoryFactory.renameWithOverwrite(dir, tmpIdxPropName, IndexFetcher.INDEX_PROPERTIES); return true; } catch (IOException e1) { throw new RuntimeException(e1); } finally { if (dir != null) { try { directoryFactory.release(dir); } catch (IOException e) { SolrException.log(log, "", e); } } } } /** * Write the index.properties file with the new index sub directory name * * @param dir a data directory (containing an index.properties file) * @param tmpFileName the file name to write the new index.properties to * @param tmpIdxDirName new index directory name */ private static void writeNewIndexProps(Directory dir, String tmpFileName, String tmpIdxDirName) { if (tmpFileName == null) { tmpFileName = IndexFetcher.INDEX_PROPERTIES; } final Properties p = new Properties(); // Read existing properties try { final IndexInput input = dir.openInput(IndexFetcher.INDEX_PROPERTIES, DirectoryFactory.IOCONTEXT_NO_CACHE); final InputStream is = new PropertiesInputStream(input); try { p.load(new InputStreamReader(is, StandardCharsets.UTF_8)); } catch (Exception e) { log.error("Unable to load {}", IndexFetcher.INDEX_PROPERTIES, e); } finally { IOUtils.closeQuietly(is); } } catch (IOException e) { // ignore; file does not exist } p.put("index", tmpIdxDirName); // Write new properties Writer os = null; try { IndexOutput out = dir.createOutput(tmpFileName, DirectoryFactory.IOCONTEXT_NO_CACHE); os = new OutputStreamWriter(new PropertiesOutputStream(out), StandardCharsets.UTF_8); p.store(os, IndexFetcher.INDEX_PROPERTIES); dir.sync(Collections.singleton(tmpFileName)); } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to write " + IndexFetcher.INDEX_PROPERTIES, e); } finally { IOUtils.closeQuietly(os); } } private String initUpdateLogDir(CoreDescriptor coreDescriptor) { String updateLogDir = coreDescriptor.getUlogDir(); if (updateLogDir == null) { updateLogDir = coreDescriptor.getInstanceDir().resolve(dataDir).toString(); } return updateLogDir; } /** * Close the core, if it is still in use waits until is no longer in use. * * @see #close() * @see #isClosed() */ public void closeAndWait() { close(); while (!isClosed()) { final long milliSleep = 100; if (log.isInfoEnabled()) { log.info("Core {} is not yet closed, waiting {} ms before checking again.", getName(), milliSleep); } try { Thread.sleep(milliSleep); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new SolrException(ErrorCode.SERVER_ERROR, "Caught InterruptedException whilst waiting for core " + getName() + " to close: " + e.getMessage(), e); } } } private Codec initCodec(SolrConfig solrConfig, final IndexSchema schema) { final PluginInfo info = solrConfig.getPluginInfo(CodecFactory.class.getName()); final CodecFactory factory; if (info != null) { factory = resourceLoader.newInstance(info.className, CodecFactory.class); factory.init(info.initArgs); } else { factory = new CodecFactory() { @Override public Codec getCodec() { return Codec.getDefault(); } }; } if (factory instanceof SolrCoreAware) { // CodecFactory needs SolrCore before inform() is called on all registered // SolrCoreAware listeners, at the end of the SolrCore constructor ((SolrCoreAware) factory).inform(this); } else { for (FieldType ft : schema.getFieldTypes().values()) { if (null != ft.getPostingsFormat()) { String msg = "FieldType '" + ft.getTypeName() + "' is configured with a postings format, but the codec does not support it: " + factory.getClass(); log.error(msg); throw new SolrException(ErrorCode.SERVER_ERROR, msg); } if (null != ft.getDocValuesFormat()) { String msg = "FieldType '" + ft.getTypeName() + "' is configured with a docValues format, but the codec does not support it: " + factory.getClass(); log.error(msg); throw new SolrException(ErrorCode.SERVER_ERROR, msg); } } } return factory.getCodec(); } /** * Create an instance of {@link StatsCache} using configured parameters. */ public StatsCache createStatsCache() { final StatsCache cache; PluginInfo pluginInfo = solrConfig.getPluginInfo(StatsCache.class.getName()); if (pluginInfo != null && pluginInfo.className != null && pluginInfo.className.length() > 0) { cache = createInitInstance(pluginInfo, StatsCache.class, null, LocalStatsCache.class.getName()); if (log.isDebugEnabled()) { log.debug("Using statsCache impl: {}", cache.getClass().getName()); } } else { if (log.isDebugEnabled()) { log.debug("Using default statsCache cache: {}", LocalStatsCache.class.getName()); } cache = new LocalStatsCache(); } return cache; } /** * Load the request processors */ private Map<String, UpdateRequestProcessorChain> loadUpdateProcessorChains() { Map<String, UpdateRequestProcessorChain> map = new HashMap<>(); UpdateRequestProcessorChain def = initPlugins(map, UpdateRequestProcessorChain.class, UpdateRequestProcessorChain.class.getName()); if (def == null) { def = map.get(null); } if (def == null) { log.debug("no updateRequestProcessorChain defined as default, creating implicit default"); // construct the default chain UpdateRequestProcessorFactory[] factories = new UpdateRequestProcessorFactory[]{ new LogUpdateProcessorFactory(), new DistributedUpdateProcessorFactory(), new RunUpdateProcessorFactory() }; def = new UpdateRequestProcessorChain(Arrays.asList(factories), this); } map.put(null, def); map.put("", def); map.computeIfAbsent(RunUpdateProcessorFactory.PRE_RUN_CHAIN_NAME, k -> new UpdateRequestProcessorChain(Collections.singletonList(new NestedUpdateProcessorFactory()), this)); return map; } public SolrCoreState getSolrCoreState() { return solrCoreState; } /** * @return an update processor registered to the given name. Throw an exception if this chain is undefined */ public UpdateRequestProcessorChain getUpdateProcessingChain(final String name) { UpdateRequestProcessorChain chain = updateProcessorChains.get(name); if (chain == null) { throw new SolrException(ErrorCode.BAD_REQUEST, "unknown UpdateRequestProcessorChain: " + name); } return chain; } public UpdateRequestProcessorChain getUpdateProcessorChain(SolrParams params) { String chainName = params.get(UpdateParams.UPDATE_CHAIN); UpdateRequestProcessorChain defaultUrp = getUpdateProcessingChain(chainName); ProcessorInfo processorInfo = new ProcessorInfo(params); if (processorInfo.isEmpty()) return defaultUrp; return UpdateRequestProcessorChain.constructChain(defaultUrp, processorInfo, this); } public PluginBag<UpdateRequestProcessorFactory> getUpdateProcessors() { return updateProcessors; } public CircuitBreakerManager getCircuitBreakerManager() { return circuitBreakerManager; } // this core current usage count private final AtomicInteger refCount = new AtomicInteger(1); /** * expert: increments the core reference count */ public void open() { refCount.incrementAndGet(); MDCLoggingContext.setCore(this); } /** * Close all resources allocated by the core if it is no longer in use... * <ul> * <li>searcher</li> * <li>updateHandler</li> * <li>all CloseHooks will be notified</li> * <li>All MBeans will be unregistered from MBeanServer if JMX was enabled * </li> * </ul> * <p> * The behavior of this method is determined by the result of decrementing * the core's reference count (A core is created with a reference count of 1)... * </p> * <ul> * <li>If reference count is &gt; 0, the usage count is decreased by 1 and no * resources are released. * </li> * <li>If reference count is == 0, the resources are released. * <li>If reference count is &lt; 0, and error is logged and no further action * is taken. * </li> * </ul> * * @see #isClosed() */ @Override public void close() { MDCLoggingContext.clear(); // balance out open with close int count = refCount.decrementAndGet(); if (count > 0) return; // close is called often, and only actually closes if nothing is using it. if (count < 0) { log.error("Too many close [count:{}] on {}. Please report this exception to [email protected]", count, this); assert false : "Too many closes on SolrCore"; return; } log.info("{} CLOSING SolrCore {}", logid, this); ExecutorUtil.shutdownAndAwaitTermination(coreAsyncTaskExecutor); // stop reporting metrics try { coreMetricManager.close(); } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } if (closeHooks != null) { for (CloseHook hook : closeHooks) { try { hook.preClose(this); } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } } } if (reqHandlers != null) reqHandlers.close(); responseWriters.close(); searchComponents.close(); qParserPlugins.close(); valueSourceParsers.close(); transformerFactories.close(); try { if (null != updateHandler) { updateHandler.close(); } } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } boolean coreStateClosed = false; try { if (solrCoreState != null) { if (updateHandler instanceof IndexWriterCloser) { coreStateClosed = solrCoreState.decrefSolrCoreState((IndexWriterCloser) updateHandler); } else { coreStateClosed = solrCoreState.decrefSolrCoreState(null); } } } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } try { ExecutorUtil.shutdownAndAwaitTermination(searcherExecutor); } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } assert ObjectReleaseTracker.release(searcherExecutor); try { // Since we waited for the searcherExecutor to shut down, // there should be no more searchers warming in the background // that we need to take care of. // // For the case that a searcher was registered *before* warming // then the searchExecutor will throw an exception when getSearcher() // tries to use it, and the exception handling code should close it. closeSearcher(); } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } if (coreStateClosed) { try { cleanupOldIndexDirectories(false); } catch (Exception e) { SolrException.log(log, e); } } try { infoRegistry.clear(); } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } // Close the snapshots meta-data directory. Directory snapshotsDir = snapshotMgr.getSnapshotsDir(); try { this.directoryFactory.release(snapshotsDir); } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } if (coreStateClosed) { try { directoryFactory.close(); } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } } if (closeHooks != null) { for (CloseHook hook : closeHooks) { try { hook.postClose(this); } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } } } assert ObjectReleaseTracker.release(this); } /** * Current core usage count. */ public int getOpenCount() { return refCount.get(); } /** * Whether this core is closed. */ public boolean isClosed() { return refCount.get() <= 0; } private Collection<CloseHook> closeHooks = null; /** * Add a close callback hook */ public void addCloseHook(CloseHook hook) { if (closeHooks == null) { closeHooks = new ArrayList<>(); } closeHooks.add(hook); } /** * @lucene.internal Debugging aid only. No non-test code should be released with uncommented verbose() calls. */ public static boolean VERBOSE = Boolean.parseBoolean(System.getProperty("tests.verbose", "false")); public static void verbose(Object... args) { if (!VERBOSE) return; StringBuilder sb = new StringBuilder("VERBOSE:"); // sb.append(Thread.currentThread().getName()); // sb.append(':'); for (Object o : args) { sb.append(' '); sb.append(o == null ? "(null)" : o.toString()); } // System.out.println(sb.toString()); log.info("{}", sb); } //////////////////////////////////////////////////////////////////////////////// // Request Handler //////////////////////////////////////////////////////////////////////////////// /** * Get the request handler registered to a given name. * <p> * This function is thread safe. */ public SolrRequestHandler getRequestHandler(String handlerName) { return RequestHandlerBase.getRequestHandler(RequestHandlers.normalize(handlerName), reqHandlers.handlers); } /** * Returns an unmodifiable Map containing the registered handlers */ public PluginBag<SolrRequestHandler> getRequestHandlers() { return reqHandlers.handlers; } /** * Registers a handler at the specified location. If one exists there, it will be replaced. * To remove a handler, register <code>null</code> at its path * <p> * Once registered the handler can be accessed through: * <pre> * http://${host}:${port}/${context}/${handlerName} * or: * http://${host}:${port}/${context}/select?qt=${handlerName} * </pre> * <p> * Handlers <em>must</em> be initialized before getting registered. Registered * handlers can immediately accept requests. * <p> * This call is thread safe. * * @return the previous <code>SolrRequestHandler</code> registered to this name <code>null</code> if none. */ public SolrRequestHandler registerRequestHandler(String handlerName, SolrRequestHandler handler) { return reqHandlers.register(handlerName, handler); } /** * Register the default search components */ private void loadSearchComponents() { Map<String, SearchComponent> instances = createInstances(SearchComponent.standard_components); for (Map.Entry<String, SearchComponent> e : instances.entrySet()) e.getValue().setName(e.getKey()); searchComponents.init(instances, this); for (String name : searchComponents.keySet()) { if (searchComponents.isLoaded(name) && searchComponents.get(name) instanceof HighlightComponent) { if (!HighlightComponent.COMPONENT_NAME.equals(name)) { searchComponents.put(HighlightComponent.COMPONENT_NAME, searchComponents.getRegistry().get(name)); } break; } } } /** * @return a Search Component registered to a given name. Throw an exception if the component is undefined */ public SearchComponent getSearchComponent(String name) { return searchComponents.get(name); } /** * Accessor for all the Search Components * * @return An unmodifiable Map of Search Components */ public PluginBag<SearchComponent> getSearchComponents() { return searchComponents; } //////////////////////////////////////////////////////////////////////////////// // Update Handler //////////////////////////////////////////////////////////////////////////////// /** * RequestHandlers need access to the updateHandler so they can all talk to the * same RAM indexer. */ public UpdateHandler getUpdateHandler() { return updateHandler; } //////////////////////////////////////////////////////////////////////////////// // Searcher Control //////////////////////////////////////////////////////////////////////////////// // The current searcher used to service queries. // Don't access this directly!!!! use getSearcher() to // get it (and it will increment the ref count at the same time). // This reference is protected by searcherLock. private RefCounted<SolrIndexSearcher> _searcher; // All of the normal open searchers. Don't access this directly. // protected by synchronizing on searcherLock. private final LinkedList<RefCounted<SolrIndexSearcher>> _searchers = new LinkedList<>(); private final LinkedList<RefCounted<SolrIndexSearcher>> _realtimeSearchers = new LinkedList<>(); final ExecutorService searcherExecutor = ExecutorUtil.newMDCAwareSingleThreadExecutor( new SolrNamedThreadFactory("searcherExecutor")); private int onDeckSearchers; // number of searchers preparing // Lock ordering: one can acquire the openSearcherLock and then the searcherLock, but not vice-versa. private Object searcherLock = new Object(); // the sync object for the searcher private ReentrantLock openSearcherLock = new ReentrantLock(true); // used to serialize opens/reopens for absolute ordering private final int maxWarmingSearchers; // max number of on-deck searchers allowed private final int slowQueryThresholdMillis; // threshold above which a query is considered slow private RefCounted<SolrIndexSearcher> realtimeSearcher; private Callable<DirectoryReader> newReaderCreator; // For testing boolean areAllSearcherReferencesEmpty() { boolean isEmpty; synchronized (searcherLock) { isEmpty = _searchers.isEmpty(); isEmpty = isEmpty && _realtimeSearchers.isEmpty(); isEmpty = isEmpty && (_searcher == null); isEmpty = isEmpty && (realtimeSearcher == null); } return isEmpty; } /** * Return a registered {@link RefCounted}&lt;{@link SolrIndexSearcher}&gt; with * the reference count incremented. It <b>must</b> be decremented when no longer needed. * This method should not be called from SolrCoreAware.inform() since it can result * in a deadlock if useColdSearcher==false. * If handling a normal request, the searcher should be obtained from * {@link org.apache.solr.request.SolrQueryRequest#getSearcher()} instead. * If you still think you need to call this, consider {@link #withSearcher(IOFunction)} instead which is easier to * use. * * @see SolrQueryRequest#getSearcher() * @see #withSearcher(IOFunction) */ public RefCounted<SolrIndexSearcher> getSearcher() { if (searchEnabled) { return getSearcher(false, true, null); } throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Search is temporarily disabled"); } /** * Executes the lambda with the {@link SolrIndexSearcher}. This is more convenient than using * {@link #getSearcher()} since there is no ref-counting business to worry about. * Example: * <pre class="prettyprint"> * IndexReader reader = h.getCore().withSearcher(SolrIndexSearcher::getIndexReader); * </pre> * Warning: although a lambda is concise, it may be inappropriate to simply return the IndexReader because it might * be closed soon after this method returns; it really depends. */ @SuppressWarnings("unchecked") public <R> R withSearcher(IOFunction<SolrIndexSearcher, R> lambda) throws IOException { final RefCounted<SolrIndexSearcher> refCounted = getSearcher(); try { return lambda.apply(refCounted.get()); } finally { refCounted.decref(); } } /** * Computes fingerprint of a segment and caches it only if all the version in segment are included in the fingerprint. * We can't use computeIfAbsent as caching is conditional (as described above) * There is chance that two threads may compute fingerprint on the same segment. It might be OK to do so rather than locking entire map. * * @param searcher searcher that includes specified LeaderReaderContext * @param ctx LeafReaderContext of a segment to compute fingerprint of * @param maxVersion maximum version number to consider for fingerprint computation * @return IndexFingerprint of the segment * @throws IOException Can throw IOException */ public IndexFingerprint getIndexFingerprint(SolrIndexSearcher searcher, LeafReaderContext ctx, long maxVersion) throws IOException { IndexReader.CacheHelper cacheHelper = ctx.reader().getReaderCacheHelper(); if (cacheHelper == null) { if (log.isDebugEnabled()) { log.debug("Cannot cache IndexFingerprint as reader does not support caching. searcher:{} reader:{} readerHash:{} maxVersion:{}", searcher, ctx.reader(), ctx.reader().hashCode(), maxVersion); } return IndexFingerprint.getFingerprint(searcher, ctx, maxVersion); } IndexFingerprint f = null; f = perSegmentFingerprintCache.get(cacheHelper.getKey()); // fingerprint is either not cached or // if we want fingerprint only up to a version less than maxVersionEncountered in the segment, or // documents were deleted from segment for which fingerprint was cached // if (f == null || (f.getMaxInHash() > maxVersion) || (f.getNumDocs() != ctx.reader().numDocs())) { if (log.isDebugEnabled()) { log.debug("IndexFingerprint cache miss for searcher:{} reader:{} readerHash:{} maxVersion:{}", searcher, ctx.reader(), ctx.reader().hashCode(), maxVersion); } f = IndexFingerprint.getFingerprint(searcher, ctx, maxVersion); // cache fingerprint for the segment only if all the versions in the segment are included in the fingerprint if (f.getMaxVersionEncountered() == f.getMaxInHash()) { log.debug("Caching fingerprint for searcher:{} leafReaderContext:{} mavVersion:{}", searcher, ctx, maxVersion); perSegmentFingerprintCache.put(cacheHelper.getKey(), f); } } else { if (log.isDebugEnabled()) { log.debug("IndexFingerprint cache hit for searcher:{} reader:{} readerHash:{} maxVersion:{}", searcher, ctx.reader(), ctx.reader().hashCode(), maxVersion); } } if (log.isDebugEnabled()) { log.debug("Cache Size: {}, Segments Size:{}", perSegmentFingerprintCache.size(), searcher.getTopReaderContext().leaves().size()); } return f; } /** * Returns the current registered searcher with its reference count incremented, or null if none are registered. */ public RefCounted<SolrIndexSearcher> getRegisteredSearcher() { synchronized (searcherLock) { if (_searcher != null) { _searcher.incref(); } return _searcher; } } /** * Return the newest normal {@link RefCounted}&lt;{@link SolrIndexSearcher}&gt; with * the reference count incremented. It <b>must</b> be decremented when no longer needed. * If no searcher is currently open, then if openNew==true a new searcher will be opened, * or null is returned if openNew==false. */ public RefCounted<SolrIndexSearcher> getNewestSearcher(boolean openNew) { synchronized (searcherLock) { if (!_searchers.isEmpty()) { RefCounted<SolrIndexSearcher> newest = _searchers.getLast(); newest.incref(); return newest; } } return openNew ? getRealtimeSearcher() : null; } /** * Gets the latest real-time searcher w/o forcing open a new searcher if one already exists. * The reference count will be incremented. */ public RefCounted<SolrIndexSearcher> getRealtimeSearcher() { synchronized (searcherLock) { if (realtimeSearcher != null) { realtimeSearcher.incref(); return realtimeSearcher; } } // use the searcher lock to prevent multiple people from trying to open at once openSearcherLock.lock(); try { // try again synchronized (searcherLock) { if (realtimeSearcher != null) { realtimeSearcher.incref(); return realtimeSearcher; } } // force a new searcher open return openNewSearcher(true, true); } finally { openSearcherLock.unlock(); } } public RefCounted<SolrIndexSearcher> getSearcher(boolean forceNew, boolean returnSearcher, @SuppressWarnings({"rawtypes"})final Future[] waitSearcher) { return getSearcher(forceNew, returnSearcher, waitSearcher, false); } /** * Opens a new searcher and returns a RefCounted&lt;SolrIndexSearcher&gt; with its reference incremented. * <p> * "realtime" means that we need to open quickly for a realtime view of the index, hence don't do any * autowarming and add to the _realtimeSearchers queue rather than the _searchers queue (so it won't * be used for autowarming by a future normal searcher). A "realtime" searcher will currently never * become "registered" (since it currently lacks caching). * <p> * realtimeSearcher is updated to the latest opened searcher, regardless of the value of "realtime". * <p> * This method acquires openSearcherLock - do not call with searchLock held! */ public RefCounted<SolrIndexSearcher> openNewSearcher(boolean updateHandlerReopens, boolean realtime) { if (isClosed()) { // catch some errors quicker throw new SolrCoreState.CoreIsClosedException(); } SolrIndexSearcher tmp; RefCounted<SolrIndexSearcher> newestSearcher = null; openSearcherLock.lock(); try { String newIndexDir = getNewIndexDir(); String indexDirFile = null; String newIndexDirFile = null; // if it's not a normal near-realtime update, check that paths haven't changed. if (!updateHandlerReopens) { indexDirFile = getDirectoryFactory().normalize(getIndexDir()); newIndexDirFile = getDirectoryFactory().normalize(newIndexDir); } synchronized (searcherLock) { newestSearcher = realtimeSearcher; if (newestSearcher != null) { newestSearcher.incref(); // the matching decref is in the finally block } } if (newestSearcher != null && (updateHandlerReopens || indexDirFile.equals(newIndexDirFile))) { DirectoryReader newReader; DirectoryReader currentReader = newestSearcher.get().getRawReader(); // SolrCore.verbose("start reopen from",previousSearcher,"writer=",writer); RefCounted<IndexWriter> writer = getSolrCoreState().getIndexWriter(null); try { if (writer != null) { // if in NRT mode, open from the writer newReader = DirectoryReader.openIfChanged(currentReader, writer.get(), true); } else { // verbose("start reopen without writer, reader=", currentReader); newReader = DirectoryReader.openIfChanged(currentReader); // verbose("reopen result", newReader); } } finally { if (writer != null) { writer.decref(); } } if (newReader == null) { // the underlying index has not changed at all if (realtime) { // if this is a request for a realtime searcher, just return the same searcher newestSearcher.incref(); return newestSearcher; } else if (newestSearcher.get().isCachingEnabled() && newestSearcher.get().getSchema() == getLatestSchema()) { // absolutely nothing has changed, can use the same searcher // but log a message about it to minimize confusion newestSearcher.incref(); if (log.isDebugEnabled()) { log.debug("SolrIndexSearcher has not changed - not re-opening: {}", newestSearcher.get().getName()); } return newestSearcher; } // ELSE: open a new searcher against the old reader... currentReader.incRef(); newReader = currentReader; } // for now, turn off caches if this is for a realtime reader // (caches take a little while to instantiate) final boolean useCaches = !realtime; final String newName = realtime ? "realtime" : "main"; tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), newName, newReader, true, useCaches, true, directoryFactory); } else { // newestSearcher == null at this point if (newReaderCreator != null) { // this is set in the constructor if there is a currently open index writer // so that we pick up any uncommitted changes and so we don't go backwards // in time on a core reload DirectoryReader newReader = newReaderCreator.call(); tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), (realtime ? "realtime" : "main"), newReader, true, !realtime, true, directoryFactory); } else { RefCounted<IndexWriter> writer = getSolrCoreState().getIndexWriter(this); DirectoryReader newReader = null; try { newReader = indexReaderFactory.newReader(writer.get(), this); } finally { writer.decref(); } tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), (realtime ? "realtime" : "main"), newReader, true, !realtime, true, directoryFactory); } } List<RefCounted<SolrIndexSearcher>> searcherList = realtime ? _realtimeSearchers : _searchers; RefCounted<SolrIndexSearcher> newSearcher = newHolder(tmp, searcherList); // refcount now at 1 // Increment reference again for "realtimeSearcher" variable. It should be at 2 after. // When it's decremented by both the caller of this method, and by realtimeSearcher being replaced, // it will be closed. newSearcher.incref(); synchronized (searcherLock) { // Check if the core is closed again inside the lock in case this method is racing with a close. If the core is // closed, clean up the new searcher and bail. if (isClosed()) { newSearcher.decref(); // once for caller since we're not returning it newSearcher.decref(); // once for ourselves since it won't be "replaced" throw new SolrException(ErrorCode.SERVER_ERROR, "openNewSearcher called on closed core"); } if (realtimeSearcher != null) { realtimeSearcher.decref(); } realtimeSearcher = newSearcher; searcherList.add(realtimeSearcher); } return newSearcher; } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Error opening new searcher", e); } finally { openSearcherLock.unlock(); if (newestSearcher != null) { newestSearcher.decref(); } } } /** * Get a {@link SolrIndexSearcher} or start the process of creating a new one. * <p> * The registered searcher is the default searcher used to service queries. * A searcher will normally be registered after all of the warming * and event handlers (newSearcher or firstSearcher events) have run. * In the case where there is no registered searcher, the newly created searcher will * be registered before running the event handlers (a slow searcher is better than no searcher). * * <p> * These searchers contain read-only IndexReaders. To access a non read-only IndexReader, * see newSearcher(String name, boolean readOnly). * * <p> * If <code>forceNew==true</code> then * A new searcher will be opened and registered regardless of whether there is already * a registered searcher or other searchers in the process of being created. * <p> * If <code>forceNew==false</code> then:<ul> * <li>If a searcher is already registered, that searcher will be returned</li> * <li>If no searcher is currently registered, but at least one is in the process of being created, then * this call will block until the first searcher is registered</li> * <li>If no searcher is currently registered, and no searchers in the process of being registered, a new * searcher will be created.</li> * </ul> * <p> * If <code>returnSearcher==true</code> then a {@link RefCounted}&lt;{@link SolrIndexSearcher}&gt; will be returned with * the reference count incremented. It <b>must</b> be decremented when no longer needed. * <p> * If <code>waitSearcher!=null</code> and a new {@link SolrIndexSearcher} was created, * then it is filled in with a Future that will return after the searcher is registered. The Future may be set to * <code>null</code> in which case the SolrIndexSearcher created has already been registered at the time * this method returned. * <p> * * @param forceNew if true, force the open of a new index searcher regardless if there is already one open. * @param returnSearcher if true, returns a {@link SolrIndexSearcher} holder with the refcount already incremented. * @param waitSearcher if non-null, will be filled in with a {@link Future} that will return after the new searcher is registered. * @param updateHandlerReopens if true, the UpdateHandler will be used when reopening a {@link SolrIndexSearcher}. */ public RefCounted<SolrIndexSearcher> getSearcher(boolean forceNew, boolean returnSearcher, @SuppressWarnings({"rawtypes"})final Future[] waitSearcher, boolean updateHandlerReopens) { // it may take some time to open an index.... we may need to make // sure that two threads aren't trying to open one at the same time // if it isn't necessary. synchronized (searcherLock) { for (; ; ) { // this loop is so w can retry in the event that we exceed maxWarmingSearchers // see if we can return the current searcher if (_searcher != null && !forceNew) { if (returnSearcher) { _searcher.incref(); return _searcher; } else { return null; } } // check to see if we can wait for someone else's searcher to be set if (onDeckSearchers > 0 && !forceNew && _searcher == null) { try { searcherLock.wait(); } catch (InterruptedException e) { if (log.isInfoEnabled()) { log.info(SolrException.toStr(e)); } } } // check again: see if we can return right now if (_searcher != null && !forceNew) { if (returnSearcher) { _searcher.incref(); return _searcher; } else { return null; } } // At this point, we know we need to open a new searcher... // first: increment count to signal other threads that we are // opening a new searcher. onDeckSearchers++; newSearcherCounter.inc(); if (onDeckSearchers < 1) { // should never happen... just a sanity check log.error("{}ERROR!!! onDeckSearchers is {}", logid, onDeckSearchers); onDeckSearchers = 1; // reset } else if (onDeckSearchers > maxWarmingSearchers) { onDeckSearchers--; newSearcherMaxReachedCounter.inc(); try { searcherLock.wait(); } catch (InterruptedException e) { if (log.isInfoEnabled()) { log.info(SolrException.toStr(e)); } } continue; // go back to the top of the loop and retry } else if (onDeckSearchers > 1) { log.warn("{}PERFORMANCE WARNING: Overlapping onDeckSearchers={}", logid, onDeckSearchers); } break; // I can now exit the loop and proceed to open a searcher } } // a signal to decrement onDeckSearchers if something goes wrong. final boolean[] decrementOnDeckCount = new boolean[]{true}; RefCounted<SolrIndexSearcher> currSearcherHolder = null; // searcher we are autowarming from RefCounted<SolrIndexSearcher> searchHolder = null; boolean success = false; openSearcherLock.lock(); Timer.Context timerContext = newSearcherTimer.time(); try { searchHolder = openNewSearcher(updateHandlerReopens, false); // the searchHolder will be incremented once already (and it will eventually be assigned to _searcher when registered) // increment it again if we are going to return it to the caller. if (returnSearcher) { searchHolder.incref(); } final RefCounted<SolrIndexSearcher> newSearchHolder = searchHolder; final SolrIndexSearcher newSearcher = newSearchHolder.get(); boolean alreadyRegistered = false; synchronized (searcherLock) { if (_searcher == null) { // if there isn't a current searcher then we may // want to register this one before warming is complete instead of waiting. if (solrConfig.useColdSearcher) { registerSearcher(newSearchHolder); decrementOnDeckCount[0] = false; alreadyRegistered = true; } } else { // get a reference to the current searcher for purposes of autowarming. currSearcherHolder = _searcher; currSearcherHolder.incref(); } } final SolrIndexSearcher currSearcher = currSearcherHolder == null ? null : currSearcherHolder.get(); @SuppressWarnings({"rawtypes"}) Future future = null; // if the underlying searcher has not changed, no warming is needed if (newSearcher != currSearcher) { // warm the new searcher based on the current searcher. // should this go before the other event handlers or after? if (currSearcher != null) { future = searcherExecutor.submit(() -> { Timer.Context warmupContext = newSearcherWarmupTimer.time(); try { newSearcher.warm(currSearcher); } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } finally { warmupContext.close(); } return null; }); } if (currSearcher == null) { future = searcherExecutor.submit(() -> { try { for (SolrEventListener listener : firstSearcherListeners) { listener.newSearcher(newSearcher, null); } } catch (Throwable e) { SolrException.log(log, null, e); if (e instanceof Error) { throw (Error) e; } } return null; }); } if (currSearcher != null) { future = searcherExecutor.submit(() -> { try { for (SolrEventListener listener : newSearcherListeners) { listener.newSearcher(newSearcher, currSearcher); } } catch (Throwable e) { SolrException.log(log, null, e); if (e instanceof Error) { throw (Error) e; } } return null; }); } } // WARNING: this code assumes a single threaded executor (that all tasks // queued will finish first). final RefCounted<SolrIndexSearcher> currSearcherHolderF = currSearcherHolder; if (!alreadyRegistered) { future = searcherExecutor.submit( () -> { try { // registerSearcher will decrement onDeckSearchers and // do a notify, even if it fails. registerSearcher(newSearchHolder); } catch (Throwable e) { SolrException.log(log, e); if (e instanceof Error) { throw (Error) e; } } finally { // we are all done with the old searcher we used // for warming... if (currSearcherHolderF != null) currSearcherHolderF.decref(); } return null; } ); } if (waitSearcher != null) { waitSearcher[0] = future; } success = true; // Return the searcher as the warming tasks run in parallel // callers may wait on the waitSearcher future returned. return returnSearcher ? newSearchHolder : null; } catch (Exception e) { if (e instanceof RuntimeException) throw (RuntimeException) e; throw new SolrException(ErrorCode.SERVER_ERROR, e); } finally { timerContext.close(); if (!success) { newSearcherOtherErrorsCounter.inc(); ; synchronized (searcherLock) { onDeckSearchers--; if (onDeckSearchers < 0) { // sanity check... should never happen log.error("{}ERROR!!! onDeckSearchers after decrement={}", logid, onDeckSearchers); onDeckSearchers = 0; // try and recover } // if we failed, we need to wake up at least one waiter to continue the process searcherLock.notify(); } if (currSearcherHolder != null) { currSearcherHolder.decref(); } if (searchHolder != null) { searchHolder.decref(); // decrement 1 for _searcher (searchHolder will never become _searcher now) if (returnSearcher) { searchHolder.decref(); // decrement 1 because we won't be returning the searcher to the user } } } // we want to do this after we decrement onDeckSearchers so another thread // doesn't increment first and throw a false warning. openSearcherLock.unlock(); } } private RefCounted<SolrIndexSearcher> newHolder(SolrIndexSearcher newSearcher, final List<RefCounted<SolrIndexSearcher>> searcherList) { RefCounted<SolrIndexSearcher> holder = new RefCounted<SolrIndexSearcher>(newSearcher) { @Override public void close() { try { synchronized (searcherLock) { // it's possible for someone to get a reference via the _searchers queue // and increment the refcount while RefCounted.close() is being called. // we check the refcount again to see if this has happened and abort the close. // This relies on the RefCounted class allowing close() to be called every // time the counter hits zero. if (refcount.get() > 0) return; searcherList.remove(this); } resource.close(); } catch (Exception e) { // do not allow decref() operations to fail since they are typically called in finally blocks // and throwing another exception would be very unexpected. SolrException.log(log, "Error closing searcher:" + this, e); } } }; holder.incref(); // set ref count to 1 to account for this._searcher return holder; } public boolean isReloaded() { return isReloaded; } // Take control of newSearcherHolder (which should have a reference count of at // least 1 already. If the caller wishes to use the newSearcherHolder directly // after registering it, then they should increment the reference count *before* // calling this method. // // onDeckSearchers will also be decremented (it should have been incremented // as a result of opening a new searcher). private void registerSearcher(RefCounted<SolrIndexSearcher> newSearcherHolder) { synchronized (searcherLock) { try { if (_searcher == newSearcherHolder) { // trying to re-register the same searcher... this can now happen when a commit has been done but // there were no changes to the index. newSearcherHolder.decref(); // decref since the caller should have still incref'd (since they didn't know the searcher was the same) return; // still execute the finally block to notify anyone waiting. } if (_searcher != null) { _searcher.decref(); // dec refcount for this._searcher _searcher = null; } _searcher = newSearcherHolder; SolrIndexSearcher newSearcher = newSearcherHolder.get(); /*** // a searcher may have been warming asynchronously while the core was being closed. // if this happens, just close the searcher. if (isClosed()) { // NOTE: this should not happen now - see close() for details. // *BUT* if we left it enabled, this could still happen before // close() stopped the executor - so disable this test for now. log.error("Ignoring searcher register on closed core:{}", newSearcher); _searcher.decref(); } ***/ newSearcher.register(); // register subitems (caches) if (log.isInfoEnabled()) { log.info("{} Registered new searcher autowarm time: {} ms", logid, newSearcher.getWarmupTime()); } } catch (Exception e) { // an exception in register() shouldn't be fatal. log(e); } finally { // wake up anyone waiting for a searcher // even in the face of errors. onDeckSearchers--; searcherLock.notifyAll(); assert TestInjection.injectSearcherHooks(getCoreDescriptor() != null && getCoreDescriptor().getCloudDescriptor() != null ? getCoreDescriptor().getCloudDescriptor().getCollectionName() : null); } } } public void closeSearcher() { log.debug("{}Closing main searcher on request.", logid); synchronized (searcherLock) { if (realtimeSearcher != null) { realtimeSearcher.decref(); realtimeSearcher = null; } if (_searcher != null) { _searcher.decref(); // dec refcount for this._searcher _searcher = null; // isClosed() does check this } } } public void execute(SolrRequestHandler handler, SolrQueryRequest req, SolrQueryResponse rsp) { if (handler == null) { String msg = "Null Request Handler '" + req.getParams().get(CommonParams.QT) + "'"; log.warn("{}{}:{}", logid, msg, req); throw new SolrException(ErrorCode.BAD_REQUEST, msg); } preDecorateResponse(req, rsp); /* * Keeping this usage of isDebugEnabled because the extraction of the log data as a string might be slow. TODO: * Determine how likely it is that something is going to go wrong that will prevent the logging at INFO further * down, and if possible, prevent that situation. The handleRequest and postDecorateResponse methods do not indicate * that they throw any checked exceptions, so it would have to be an unchecked exception that causes any problems. */ if (requestLog.isDebugEnabled() && rsp.getToLog().size() > 0) { // log request at debug in case something goes wrong and we aren't able to log later requestLog.debug(rsp.getToLogAsString(logid)); } // TODO: this doesn't seem to be working correctly and causes problems with the example server and distrib (for example /spell) // if (req.getParams().getBool(ShardParams.IS_SHARD,false) && !(handler instanceof SearchHandler)) // throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"isShard is only acceptable with search handlers"); handler.handleRequest(req, rsp); postDecorateResponse(handler, req, rsp); if (rsp.getToLog().size() > 0) { if (requestLog.isInfoEnabled()) { requestLog.info(rsp.getToLogAsString(logid)); } /* slowQueryThresholdMillis defaults to -1 in SolrConfig -- not enabled.*/ if (log.isWarnEnabled() && slowQueryThresholdMillis >= 0) { final long qtime = (long) (req.getRequestTimer().getTime()); if (qtime >= slowQueryThresholdMillis) { slowLog.warn("slow: {}", rsp.getToLogAsString(logid)); } } } } public static void preDecorateResponse(SolrQueryRequest req, SolrQueryResponse rsp) { // setup response header final NamedList<Object> responseHeader = new SimpleOrderedMap<>(); rsp.addResponseHeader(responseHeader); // toLog is a local ref to the same NamedList used by the response NamedList<Object> toLog = rsp.getToLog(); // for back compat, we set these now just in case other code // are expecting them during handleRequest toLog.add("webapp", req.getContext().get("webapp")); toLog.add(PATH, req.getContext().get(PATH)); final SolrParams params = req.getParams(); final String lpList = params.get(CommonParams.LOG_PARAMS_LIST); if (lpList == null) { toLog.add("params", "{" + req.getParamString() + "}"); } else if (lpList.length() > 0) { // Filter params by those in LOG_PARAMS_LIST so that we can then call toString HashSet<String> lpSet = new HashSet<>(Arrays.asList(lpList.split(","))); SolrParams filteredParams = new SolrParams() { private static final long serialVersionUID = -643991638344314066L; @Override public Iterator<String> getParameterNamesIterator() { return Iterators.filter(params.getParameterNamesIterator(), lpSet::contains); } @Override public String get(String param) { // assume param is in lpSet return params.get(param); } //assume in lpSet @Override public String[] getParams(String param) { // assume param is in lpSet return params.getParams(param); } // assume in lpSet }; toLog.add("params", "{" + filteredParams + "}"); } } /** * Put status, QTime, and possibly request handler and params, in the response header */ public static void postDecorateResponse (SolrRequestHandler handler, SolrQueryRequest req, SolrQueryResponse rsp) { // TODO should check that responseHeader has not been replaced by handler NamedList<Object> responseHeader = rsp.getResponseHeader(); final int qtime = (int) (req.getRequestTimer().getTime()); int status = 0; Exception exception = rsp.getException(); if (exception != null) { if (exception instanceof SolrException) status = ((SolrException) exception).code(); else status = 500; } responseHeader.add("status", status); responseHeader.add("QTime", qtime); if (rsp.getToLog().size() > 0) { rsp.getToLog().add("status", status); rsp.getToLog().add("QTime", qtime); } SolrParams params = req.getParams(); if (null != handler && params.getBool(CommonParams.HEADER_ECHO_HANDLER, false)) { responseHeader.add("handler", handler.getName()); } // Values for echoParams... false/true/all or false/explicit/all ??? String ep = params.get(CommonParams.HEADER_ECHO_PARAMS, null); if (ep != null) { EchoParamStyle echoParams = EchoParamStyle.get(ep); if (echoParams == null) { throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid value '" + ep + "' for " + CommonParams.HEADER_ECHO_PARAMS + " parameter, use '" + EchoParamStyle.EXPLICIT + "' or '" + EchoParamStyle.ALL + "'"); } if (echoParams == EchoParamStyle.EXPLICIT) { responseHeader.add("params", req.getOriginalParams().toNamedList()); } else if (echoParams == EchoParamStyle.ALL) { responseHeader.add("params", req.getParams().toNamedList()); } } } final public static void log(Throwable e) { SolrException.log(log, null, e); } public PluginBag<QueryResponseWriter> getResponseWriters() { return responseWriters; } private final PluginBag<QueryResponseWriter> responseWriters = new PluginBag<>(QueryResponseWriter.class, this); public static final Map<String, QueryResponseWriter> DEFAULT_RESPONSE_WRITERS; static { HashMap<String, QueryResponseWriter> m = new HashMap<>(15, 1); m.put("xml", new XMLResponseWriter()); m.put(CommonParams.JSON, new JSONResponseWriter()); m.put("standard", m.get(CommonParams.JSON)); m.put("geojson", new GeoJSONResponseWriter()); m.put("graphml", new GraphMLResponseWriter()); m.put("python", new PythonResponseWriter()); m.put("php", new PHPResponseWriter()); m.put("phps", new PHPSerializedResponseWriter()); m.put("ruby", new RubyResponseWriter()); m.put("raw", new RawResponseWriter()); m.put(CommonParams.JAVABIN, new BinaryResponseWriter()); m.put("csv", new CSVResponseWriter()); m.put("schema.xml", new SchemaXmlResponseWriter()); m.put("smile", new SmileResponseWriter()); m.put(ReplicationHandler.FILE_STREAM, getFileStreamWriter()); DEFAULT_RESPONSE_WRITERS = Collections.unmodifiableMap(m); try { m.put("xlsx", (QueryResponseWriter) Class.forName("org.apache.solr.handler.extraction.XLSXResponseWriter").getConstructor().newInstance()); } catch (Exception e) { //don't worry; solrcell contrib not in class path } } private static BinaryResponseWriter getFileStreamWriter() { return new BinaryResponseWriter() { @Override public void write(OutputStream out, SolrQueryRequest req, SolrQueryResponse response) throws IOException { RawWriter rawWriter = (RawWriter) response.getValues().get(ReplicationHandler.FILE_STREAM); if (rawWriter != null) { rawWriter.write(out); if (rawWriter instanceof Closeable) ((Closeable) rawWriter).close(); } } @Override public String getContentType(SolrQueryRequest request, SolrQueryResponse response) { RawWriter rawWriter = (RawWriter) response.getValues().get(ReplicationHandler.FILE_STREAM); if (rawWriter != null) { return rawWriter.getContentType(); } else { return BinaryResponseParser.BINARY_CONTENT_TYPE; } } }; } public interface RawWriter { default String getContentType() { return BinaryResponseParser.BINARY_CONTENT_TYPE; } void write(OutputStream os) throws IOException; } /** * Configure the query response writers. There will always be a default writer; additional * writers may also be configured. */ private void initWriters() { responseWriters.init(DEFAULT_RESPONSE_WRITERS, this); // configure the default response writer; this one should never be null if (responseWriters.getDefault() == null) responseWriters.setDefault("standard"); } /** * Finds a writer by name, or returns the default writer if not found. */ public final QueryResponseWriter getQueryResponseWriter(String writerName) { return responseWriters.get(writerName, true); } /** * Returns the appropriate writer for a request. If the request specifies a writer via the * 'wt' parameter, attempts to find that one; otherwise return the default writer. */ public final QueryResponseWriter getQueryResponseWriter(SolrQueryRequest request) { return getQueryResponseWriter(request.getParams().get(CommonParams.WT)); } private final PluginBag<QParserPlugin> qParserPlugins = new PluginBag<>(QParserPlugin.class, this); public QParserPlugin getQueryPlugin(String parserName) { return qParserPlugins.get(parserName); } private final PluginBag<ValueSourceParser> valueSourceParsers = new PluginBag<>(ValueSourceParser.class, this); private final PluginBag<TransformerFactory> transformerFactories = new PluginBag<>(TransformerFactory.class, this); @SuppressWarnings({"unchecked"}) <T> Map<String, T> createInstances(Map<String, Class<? extends T>> map) { Map<String, T> result = new LinkedHashMap<>(map.size(), 1); for (Map.Entry<String, Class<? extends T>> e : map.entrySet()) { try { Object o = getResourceLoader().newInstance(e.getValue().getName(), e.getValue()); result.put(e.getKey(), (T) o); } catch (Exception exp) { //should never happen throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to instantiate class", exp); } } return result; } public TransformerFactory getTransformerFactory(String name) { return transformerFactories.get(name); } public void addTransformerFactory(String name, TransformerFactory factory) { transformerFactories.put(name, factory); } /** * @param registry The map to which the instance should be added to. The key is the name attribute * @param type the class or interface that the instance should extend or implement. * @param defClassName If PluginInfo does not have a classname, use this as the classname * @return The default instance . The one with (default=true) */ private <T> T initPlugins(Map<String, T> registry, Class<T> type, String defClassName) { return initPlugins(solrConfig.getPluginInfos(type.getName()), registry, type, defClassName); } public <T> T initPlugins(List<PluginInfo> pluginInfos, Map<String, T> registry, Class<T> type, String defClassName) { T def = null; for (PluginInfo info : pluginInfos) { T o = createInitInstance(info, type, type.getSimpleName(), defClassName); registry.put(info.name, o); if (o instanceof SolrMetricProducer) { coreMetricManager.registerMetricProducer(type.getSimpleName() + "." + info.name, (SolrMetricProducer) o); } if (info.isDefault()) { def = o; } } return def; } public void initDefaultPlugin(Object plugin, @SuppressWarnings({"rawtypes"})Class type) { if (plugin instanceof SolrMetricProducer) { coreMetricManager.registerMetricProducer(type.getSimpleName() + ".default", (SolrMetricProducer) plugin); } } /** * For a given List of PluginInfo return the instances as a List * * @param defClassName The default classname if PluginInfo#className == null * @return The instances initialized */ public <T> List<T> initPlugins(List<PluginInfo> pluginInfos, Class<T> type, String defClassName) { if (pluginInfos.isEmpty()) return Collections.emptyList(); List<T> result = new ArrayList<>(pluginInfos.size()); for (PluginInfo info : pluginInfos) result.add(createInitInstance(info, type, type.getSimpleName(), defClassName)); return result; } /** * @param registry The map to which the instance should be added to. The key is the name attribute * @param type The type of the Plugin. These should be standard ones registered by type.getName() in SolrConfig * @return The default if any */ public <T> T initPlugins(Map<String, T> registry, Class<T> type) { return initPlugins(registry, type, null); } public ValueSourceParser getValueSourceParser(String parserName) { return valueSourceParsers.get(parserName); } /** * Creates and initializes a RestManager based on configuration args in solrconfig.xml. * RestManager provides basic storage support for managed resource data, such as to * persist stopwords to ZooKeeper if running in SolrCloud mode. */ @SuppressWarnings("unchecked") protected RestManager initRestManager() throws SolrException { PluginInfo restManagerPluginInfo = getSolrConfig().getPluginInfo(RestManager.class.getName()); NamedList<String> initArgs = null; RestManager mgr = null; if (restManagerPluginInfo != null) { if (restManagerPluginInfo.className != null) { mgr = resourceLoader.newInstance(restManagerPluginInfo.className, RestManager.class); } if (restManagerPluginInfo.initArgs != null) { initArgs = (NamedList<String>) restManagerPluginInfo.initArgs; } } if (mgr == null) mgr = new RestManager(); if (initArgs == null) initArgs = new NamedList<>(); String collection = getCoreDescriptor().getCollectionName(); StorageIO storageIO = ManagedResourceStorage.newStorageIO(collection, resourceLoader, initArgs); mgr.init(resourceLoader, initArgs, storageIO); return mgr; } public CoreDescriptor getCoreDescriptor() { return coreDescriptor; } public IndexDeletionPolicyWrapper getDeletionPolicy() { return solrDelPolicy; } /** * @return A reference of {@linkplain SolrSnapshotMetaDataManager} * managing the persistent snapshots for this Solr core. */ public SolrSnapshotMetaDataManager getSnapshotMetaDataManager() { return snapshotMgr; } public ReentrantLock getRuleExpiryLock() { return ruleExpiryLock; } ///////////////////////////////////////////////////////////////////// // SolrInfoBean stuff: Statistics and Module Info ///////////////////////////////////////////////////////////////////// @Override public String getDescription() { return "SolrCore"; } @Override public Category getCategory() { return Category.CORE; } public Codec getCodec() { return codec; } public void unloadOnClose(final CoreDescriptor desc, boolean deleteIndexDir, boolean deleteDataDir, boolean deleteInstanceDir) { if (deleteIndexDir) { try { directoryFactory.remove(getIndexDir()); } catch (Exception e) { SolrException.log(log, "Failed to flag index dir for removal for core:" + name + " dir:" + getIndexDir()); } } if (deleteDataDir) { try { directoryFactory.remove(getDataDir(), true); } catch (Exception e) { SolrException.log(log, "Failed to flag data dir for removal for core:" + name + " dir:" + getDataDir()); } } if (deleteInstanceDir) { addCloseHook(new CloseHook() { @Override public void preClose(SolrCore core) { // empty block } @Override public void postClose(SolrCore core) { if (desc != null) { try { FileUtils.deleteDirectory(desc.getInstanceDir().toFile()); } catch (IOException e) { SolrException.log(log, "Failed to delete instance dir for core:" + core.getName() + " dir:" + desc.getInstanceDir()); } } } }); } } public static void deleteUnloadedCore(CoreDescriptor cd, boolean deleteDataDir, boolean deleteInstanceDir) { if (deleteDataDir) { File dataDir = cd.getInstanceDir().resolve(cd.getDataDir()).toFile(); try { FileUtils.deleteDirectory(dataDir); } catch (IOException e) { log.error("Failed to delete data dir for unloaded core: {} dir: {}", cd.getName(), dataDir.getAbsolutePath(), e); } } if (deleteInstanceDir) { try { FileUtils.deleteDirectory(cd.getInstanceDir().toFile()); } catch (IOException e) { log.error("Failed to delete instance dir for unloaded core: {} dir: {}", cd.getName(), cd.getInstanceDir(), e); } } } /** * Register to notify for any file change in the conf directory. * If the file change results in a core reload , then the listener * is not fired */ public void addConfListener(Runnable runnable) { confListeners.add(runnable); } /** * Remove a listener */ public boolean removeConfListener(Runnable runnable) { return confListeners.remove(runnable); } /** * This registers one listener for the entire conf directory. In zookeeper * there is no event fired when children are modified. So , we expect everyone * to 'touch' the /conf directory by setting some data so that events are triggered. */ private void registerConfListener() { if (!(resourceLoader instanceof ZkSolrResourceLoader)) return; final ZkSolrResourceLoader zkSolrResourceLoader = (ZkSolrResourceLoader) resourceLoader; if (zkSolrResourceLoader != null) zkSolrResourceLoader.getZkController().registerConfListenerForCore( zkSolrResourceLoader.getConfigSetZkPath(), this, getConfListener(this, zkSolrResourceLoader)); } public static Runnable getConfListener(SolrCore core, ZkSolrResourceLoader zkSolrResourceLoader) { final String coreName = core.getName(); final UUID coreId = core.uniqueId; final CoreContainer cc = core.getCoreContainer(); final String overlayPath = zkSolrResourceLoader.getConfigSetZkPath() + "/" + ConfigOverlay.RESOURCE_NAME; final String solrConfigPath = zkSolrResourceLoader.getConfigSetZkPath() + "/" + core.getSolrConfig().getName(); String schemaRes = null; if (core.getLatestSchema().isMutable() && core.getLatestSchema() instanceof ManagedIndexSchema) { ManagedIndexSchema mis = (ManagedIndexSchema) core.getLatestSchema(); schemaRes = mis.getResourceName(); } final String managedSchmaResourcePath = schemaRes == null ? null : zkSolrResourceLoader.getConfigSetZkPath() + "/" + schemaRes; return () -> { log.info("config update listener called for core {}", coreName); SolrZkClient zkClient = cc.getZkController().getZkClient(); int solrConfigversion, overlayVersion, managedSchemaVersion = 0; SolrConfig cfg = null; try (SolrCore solrCore = cc.solrCores.getCoreFromAnyList(coreName, true)) { if (solrCore == null || solrCore.isClosed() || solrCore.getCoreContainer().isShutDown()) return; cfg = solrCore.getSolrConfig(); solrConfigversion = solrCore.getSolrConfig().getOverlay().getZnodeVersion(); overlayVersion = solrCore.getSolrConfig().getZnodeVersion(); if (managedSchmaResourcePath != null) { managedSchemaVersion = ((ManagedIndexSchema) solrCore.getLatestSchema()).getSchemaZkVersion(); } } if (cfg != null) { cfg.refreshRequestParams(); } if (checkStale(zkClient, overlayPath, solrConfigversion) || checkStale(zkClient, solrConfigPath, overlayVersion) || checkStale(zkClient, managedSchmaResourcePath, managedSchemaVersion)) { log.info("core reload {}", coreName); SolrConfigHandler configHandler = ((SolrConfigHandler) core.getRequestHandler("/config")); if (configHandler.getReloadLock().tryLock()) { try { cc.reload(coreName, coreId); } catch (SolrCoreState.CoreIsClosedException e) { /*no problem this core is already closed*/ } finally { configHandler.getReloadLock().unlock(); } } else { log.info("Another reload is in progress. Not doing anything."); } return; } //some files in conf directory may have other than managedschema, overlay, params try (SolrCore solrCore = cc.solrCores.getCoreFromAnyList(coreName, true)) { if (solrCore == null || solrCore.isClosed() || cc.isShutDown()) return; for (Runnable listener : solrCore.confListeners) { try { listener.run(); } catch (Exception e) { log.error("Error in listener ", e); } } } }; } public void registerInfoBean(String name, SolrInfoBean solrInfoBean) { infoRegistry.put(name, solrInfoBean); if (solrInfoBean instanceof SolrMetricProducer) { SolrMetricProducer producer = (SolrMetricProducer) solrInfoBean; coreMetricManager.registerMetricProducer(name, producer); } } private static boolean checkStale(SolrZkClient zkClient, String zkPath, int currentVersion) { if (zkPath == null) return false; try { Stat stat = zkClient.exists(zkPath, null, true); if (stat == null) { if (currentVersion > -1) return true; return false; } if (stat.getVersion() > currentVersion) { if (log.isDebugEnabled()) { log.debug("{} is stale will need an update from {} to {}", zkPath, currentVersion, stat.getVersion()); } return true; } return false; } catch (KeeperException.NoNodeException nne) { //no problem } catch (KeeperException e) { log.error("error refreshing solrconfig ", e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } return false; } public void cleanupOldIndexDirectories(boolean reload) { final DirectoryFactory myDirFactory = getDirectoryFactory(); final String myDataDir = getDataDir(); final String myIndexDir = getNewIndexDir(); // ensure the latest replicated index is protected final String coreName = getName(); if (myDirFactory != null && myDataDir != null && myIndexDir != null) { Thread cleanupThread = new Thread(() -> { log.debug("Looking for old index directories to cleanup for core {} in {}", coreName, myDataDir); try { myDirFactory.cleanupOldIndexDirectories(myDataDir, myIndexDir, reload); } catch (Exception exc) { log.error("Failed to cleanup old index directories for core {}", coreName, exc); } }, "OldIndexDirectoryCleanupThreadForCore-" + coreName); cleanupThread.setDaemon(true); cleanupThread.start(); } } @SuppressWarnings({"rawtypes"}) private static final Map implicitPluginsInfo = (Map) Utils.fromJSONResource("ImplicitPlugins.json"); @SuppressWarnings({"unchecked", "rawtypes"}) public List<PluginInfo> getImplicitHandlers() { List<PluginInfo> implicits = new ArrayList<>(); Map requestHandlers = (Map) implicitPluginsInfo.get(SolrRequestHandler.TYPE); for (Object o : requestHandlers.entrySet()) { Map.Entry<String, Map> entry = (Map.Entry<String, Map>) o; Map info = Utils.getDeepCopy(entry.getValue(), 4); info.put(NAME, entry.getKey()); implicits.add(new PluginInfo(SolrRequestHandler.TYPE, info)); } return implicits; } /** * Convenience method to load a blob. This method minimizes the degree to which component and other code needs * to depend on the structure of solr's object graph and ensures that a proper close hook is registered. This method * should normally be called in {@link SolrCoreAware#inform(SolrCore)}, and should never be called during request * processing. The Decoder will only run on the first invocations, subsequent invocations will return the * cached object. * * @param key A key in the format of name/version for a blob stored in the * {@link CollectionAdminParams#SYSTEM_COLL} blob store via the Blob Store API * @param decoder a decoder with which to convert the blob into a Java Object representation (first time only) * @return a reference to the blob that has already cached the decoded version. */ @SuppressWarnings({"rawtypes"}) public BlobRepository.BlobContentRef loadDecodeAndCacheBlob(String key, BlobRepository.Decoder<Object> decoder) { // make sure component authors don't give us oddball keys with no version... if (!BlobRepository.BLOB_KEY_PATTERN_CHECKER.matcher(key).matches()) { throw new IllegalArgumentException("invalid key format, must end in /N where N is the version number"); } // define the blob @SuppressWarnings({"rawtypes"}) BlobRepository.BlobContentRef blobRef = coreContainer.getBlobRepository().getBlobIncRef(key, decoder); addCloseHook(new CloseHook() { @Override public void preClose(SolrCore core) { } @Override public void postClose(SolrCore core) { coreContainer.getBlobRepository().decrementBlobRefCount(blobRef); } }); return blobRef; } /** * Run an arbitrary task in it's own thread. This is an expert option and is * a method you should use with great care. It would be bad to run something that never stopped * or run something that took a very long time. Typically this is intended for actions that take * a few seconds, and therefore would be bad to wait for within a request, but but would not pose * a significant hindrance to server shut down times. It is not intended for long running tasks * and if you are using a Runnable with a loop in it, you are almost certainly doing it wrong. * <p> * WARNING: Solr wil not be able to shut down gracefully until this task completes! * <p> * A significant upside of using this method vs creating your own ExecutorService is that your code * does not have to properly shutdown executors which typically is risky from a unit testing * perspective since the test framework will complain if you don't carefully ensure the executor * shuts down before the end of the test. Also the threads running this task are sure to have * a proper MDC for logging. * * @param r the task to run */ public void runAsync(Runnable r) { coreAsyncTaskExecutor.submit(r); } }
1
36,600
Make `CircuitBreakerManager` implement `PluginInfoInitialized`
apache-lucene-solr
java
@@ -679,7 +679,7 @@ class _ServerCapabilities: def _as_uint32(x: int) -> QVariant: """Convert the given int to an uint32 for DBus.""" variant = QVariant(x) - successful = variant.convert(QVariant.Type.UInt) + successful = variant.convert(QMetaType(QMetaType.Type.UInt.value)) assert successful return variant
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2020 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <https://www.gnu.org/licenses/>. """Different ways of showing notifications to the user. Our notification implementation consists of two different parts: - NotificationBridgePresenter, the object we set as notification presenter on QWebEngineProfiles on startup. - Adapters (subclassing from AbstractNotificationAdapter) which get called by the bridge and contain the code to show notifications using different means (e.g. a systray icon or DBus). Adapters are initialized lazily when the bridge gets the first notification. This makes sure we don't block while e.g. talking to DBus during startup, but only when needed. If an adapter raises Error during __init__, the bridge assumes that it's unavailable and tries the next one in a list of candidates. Useful test pages: - https://tests.peter.sh/notification-generator/ - https://www.bennish.net/web-notifications.html - https://web-push-book.gauntface.com/demos/notification-examples/ - tests/end2end/data/javascript/notifications.html """ import os import signal import html import dataclasses import itertools import functools import subprocess from typing import Any, List, Dict, Optional, Iterator, TYPE_CHECKING from PyQt6.QtCore import (Qt, QObject, QVariant, QMetaType, QByteArray, pyqtSlot, pyqtSignal, QTimer, QProcess, QUrl) from PyQt6.QtGui import QImage, QIcon, QPixmap from PyQt6.QtDBus import (QDBusConnection, QDBusInterface, QDBus, QDBusServiceWatcher, QDBusArgument, QDBusMessage, QDBusError) from PyQt6.QtWidgets import QSystemTrayIcon if TYPE_CHECKING: # putting these behind TYPE_CHECKING also means this module is importable # on installs that don't have these from PyQt6.QtWebEngineCore import QWebEngineNotification from PyQt6.QtWebEngineWidgets import QWebEngineProfile from qutebrowser.config import config from qutebrowser.misc import objects from qutebrowser.utils import qtutils, log, utils, debug, message, version from qutebrowser.qt import sip bridge: Optional['NotificationBridgePresenter'] = None def _notifications_supported() -> bool: """Check whether the current QtWebEngine version has notification support.""" versions = version.qtwebengine_versions(avoid_init=True) return versions.webengine >= utils.VersionNumber(5, 14) def init() -> None: """Initialize the DBus notification presenter, if applicable. If the user doesn't want a notification presenter or it's not supported, this method does nothing. Always succeeds, but might log an error. """ if config.val.content.notifications.presenter == "qt": # In theory, we could somehow postpone the install if the user switches to "qt" # at a later point in time. However, doing so is probably too complex compared # to its usefulness. return if not _notifications_supported(): return global bridge bridge = NotificationBridgePresenter() class Error(Exception): """Raised when something goes wrong with notifications.""" class AbstractNotificationAdapter(QObject): """An adapter taking notifications and displaying them. This can happen via different mechanisms, e.g. a system tray icon or DBus. """ # A short name for the adapter, shown in errors. Should be the same as the # associated content.notification.presenter setting. NAME: str # Emitted by the adapter when the notification with the given ID was closed or # clicked by the user. close_id = pyqtSignal(int) click_id = pyqtSignal(int) # Emitted by the adapter when an error occurred, which should result in the adapter # getting swapped out (potentially initializing the same adapter again, or using a # different one if that fails). error = pyqtSignal(str) clear_all = pyqtSignal() def present( self, qt_notification: "QWebEngineNotification", *, replaces_id: Optional[int], ) -> int: """Show the given notification. If replaces_id is given, replace the currently showing notification with the same ID. Returns an ID assigned to the new notifications. IDs must be positive (>= 1) and must not duplicate any active notification's ID. """ raise NotImplementedError def _should_include_origin(self, origin: QUrl) -> bool: """Check if the origin is useful to include. If we open the page via a file scheme, the origin is QUrl('file:///') which doesn't help much. """ return bool( origin.host() and config.instance.get('content.notifications.show_origin', url=origin), ) @pyqtSlot(int) def on_web_closed(self, notification_id: int) -> None: """Called when a notification was closed by the website.""" raise NotImplementedError class NotificationBridgePresenter(QObject): """Notification presenter which bridges notifications to an adapter. Takes care of: - Working around bugs in PyQt 5.14 - Storing currently shown notifications, using an ID returned by the adapter. - Initializing a suitable adapter when the first notification is shown. - Switching out adapters if the current one emitted its error signal. """ def __init__(self, parent: QObject = None) -> None: super().__init__(parent) assert _notifications_supported() self._active_notifications: Dict[int, 'QWebEngineNotification'] = {} self._adapter: Optional[AbstractNotificationAdapter] = None config.instance.changed.connect(self._init_adapter) @config.change_filter('content.notifications.presenter') def _init_adapter(self) -> None: """Initialize the adapter to use based on the config.""" setting = config.val.content.notifications.presenter log.misc.debug(f"Setting up notification adapter ({setting})...") if setting == "qt": message.error("Can't switch to qt notification presenter at runtime.") setting = "auto" if setting in ["auto", "libnotify"]: candidates = [ DBusNotificationAdapter, SystrayNotificationAdapter, MessagesNotificationAdapter, ] elif setting == "systray": candidates = [ SystrayNotificationAdapter, DBusNotificationAdapter, MessagesNotificationAdapter, ] elif setting == "herbe": candidates = [ HerbeNotificationAdapter, DBusNotificationAdapter, SystrayNotificationAdapter, MessagesNotificationAdapter, ] elif setting == "messages": candidates = [MessagesNotificationAdapter] # always succeeds else: raise utils.Unreachable(setting) for candidate in candidates: try: self._adapter = candidate() except Error as e: msg = f"Failed to initialize {candidate.NAME} notification adapter: {e}" if candidate.NAME == setting: # We picked this one explicitly message.error(msg) else: # automatic fallback log.misc.debug(msg) else: log.misc.debug(f"Initialized {self._adapter.NAME} notification adapter") break assert self._adapter is not None self._adapter.click_id.connect(self._on_adapter_clicked) self._adapter.close_id.connect(self._on_adapter_closed) self._adapter.error.connect(self._on_adapter_error) self._adapter.clear_all.connect(self._on_adapter_clear_all) def install(self, profile: "QWebEngineProfile") -> None: """Set the profile to use this bridge as the presenter.""" # WORKAROUND for # https://www.riverbankcomputing.com/pipermail/pyqt/2020-May/042916.html # Fixed in PyQtWebEngine 5.15.0 # PYQT_WEBENGINE_VERSION was added with PyQtWebEngine 5.13, but if we're here, # we already did a version check above. from PyQt6.QtWebEngineCore import PYQT_WEBENGINE_VERSION if PYQT_WEBENGINE_VERSION < 0x050F00: # PyQtWebEngine unrefs the callback after it's called, for some # reason. So we call setNotificationPresenter again to *increase* # its refcount to prevent it from getting GC'd. Otherwise, random # methods start getting called with the notification as `self`, or # segfaults happen, or other badness. def _present_and_reset(qt_notification: "QWebEngineNotification") -> None: profile.setNotificationPresenter(_present_and_reset) self.present(qt_notification) profile.setNotificationPresenter(_present_and_reset) else: profile.setNotificationPresenter(self.present) def present(self, qt_notification: "QWebEngineNotification") -> None: """Show a notification using the configured adapter. Lazily initializes a suitable adapter if none exists yet. This should *not* be directly passed to setNotificationPresenter on PyQtWebEngine < 5.15 because of a bug in the PyQtWebEngine bindings. """ if self._adapter is None: self._init_adapter() assert self._adapter is not None replaces_id = self._find_replaces_id(qt_notification) qtutils.ensure_valid(qt_notification.origin()) notification_id = self._adapter.present( qt_notification, replaces_id=replaces_id) log.misc.debug(f"New notification ID from adapter: {notification_id}") if self._adapter is None: # If a fatal error occurred, we replace the adapter via its "error" signal. log.misc.debug("Adapter vanished, bailing out") # type: ignore[unreachable] return if notification_id <= 0: raise Error(f"Got invalid notification id {notification_id}") if replaces_id is None: if notification_id in self._active_notifications: raise Error(f"Got duplicate id {notification_id}") qt_notification.show() self._active_notifications[notification_id] = qt_notification qt_notification.closed.connect( # type: ignore[attr-defined] functools.partial(self._adapter.on_web_closed, notification_id)) def _find_replaces_id( self, new_notification: "QWebEngineNotification", ) -> Optional[int]: """Find an existing notification to replace. If no notification should be replaced or the notification to be replaced was not found, this returns None. """ if not new_notification.tag(): return None log.misc.debug( f"Finding notification for tag {new_notification.tag()}, " f"origin {new_notification.origin()}") try: for notification_id, notification in sorted( self._active_notifications.items(), reverse=True): if notification.matches(new_notification): log.misc.debug(f"Found match: {notification_id}") return notification_id except RuntimeError: # WORKAROUND for # https://www.riverbankcomputing.com/pipermail/pyqt/2020-May/042918.html # (also affects .matches) log.misc.debug( f"Ignoring notification tag {new_notification.tag()!r} due to PyQt bug") log.misc.debug("Did not find match") return None @pyqtSlot(int) def _on_adapter_closed(self, notification_id: int) -> None: """A notification was closed by the adapter (usually due to the user). Accepts unknown notification IDs, as this can be called for notifications from other applications (with the DBus adapter). """ log.misc.debug(f"Notification {notification_id} closed by adapter") try: notification = self._active_notifications.pop(notification_id) except KeyError: log.misc.debug("Did not find matching notification, ignoring") # Notification from a different application return try: notification.close() except RuntimeError: # WORKAROUND for # https://www.riverbankcomputing.com/pipermail/pyqt/2020-May/042918.html log.misc.debug(f"Ignoring close request for notification {notification_id} " "due to PyQt bug") @pyqtSlot(int) def _on_adapter_clicked(self, notification_id: int) -> None: """A notification was clicked by the adapter (usually due to the user). Accepts unknown notification IDs, as this can be called for notifications from other applications (with the DBus adapter). """ log.misc.debug(f"Notification {notification_id} clicked by adapter") try: notification = self._active_notifications[notification_id] except KeyError: # Notification from a different application log.misc.debug("Did not find matching notification, ignoring") return try: notification.click() except RuntimeError: # WORKAROUND for # https://www.riverbankcomputing.com/pipermail/pyqt/2020-May/042918.html log.misc.debug(f"Ignoring click request for notification {notification_id} " "due to PyQt bug") def _drop_adapter(self) -> None: """Drop the currently active adapter (if any). This means we'll reinitialize a new one (including re-testing available options) on the next notification. """ if self._adapter: log.misc.debug(f"Dropping adapter {self._adapter.NAME}") self._adapter.deleteLater() self._adapter = None self._on_adapter_clear_all() @pyqtSlot() def _on_adapter_clear_all(self) -> None: """Called when the adapter requests clearing all notifications. This is currently only done if the DBus notification server was unregistered. It's probably safe to assume no notifications exist anymore. Also, this makes sure we don't have any duplicate IDs. Depending on the system, either the server will automatically be restarted on the next notification, or we'll get a (properly handled) NoReply error then. """ for notification_id in list(self._active_notifications): self._on_adapter_closed(notification_id) @pyqtSlot(str) def _on_adapter_error(self, error: str) -> None: """A fatal error happened in the adapter. This causes us to drop the current adapter and reinit it (or a different one) on the next notification. """ if self._adapter is None: # Error during setup return message.error(f"Notification error from {self._adapter.NAME} adapter: {error}") self._drop_adapter() class SystrayNotificationAdapter(AbstractNotificationAdapter): """Shows notifications using QSystemTrayIcon. This is essentially a reimplementation of QtWebEngine's default implementation: https://github.com/qt/qtwebengine/blob/v5.15.2/src/webenginewidgets/api/qwebenginenotificationpresenter.cpp It exists because QtWebEngine won't allow us to restore its default presenter, so if something goes wrong when trying to e.g. connect to the DBus one, we still want to be able to switch back after our presenter is already installed. Also, it's nice if users can switch presenters in the config live. """ NAME = "systray" NOTIFICATION_ID = 1 # only one concurrent notification supported def __init__(self, parent: QObject = None) -> None: super().__init__(parent) if not QSystemTrayIcon.isSystemTrayAvailable(): raise Error("No system tray available") if not QSystemTrayIcon.supportsMessages(): raise Error("System tray does not support messages") self._systray = QSystemTrayIcon(self) self._systray.setIcon(objects.qapp.windowIcon()) self._systray.messageClicked.connect(self._on_systray_clicked) def present( self, qt_notification: "QWebEngineNotification", *, replaces_id: Optional[int], ) -> int: utils.unused(replaces_id) # QSystemTray can only show one message self.close_id.emit(self.NOTIFICATION_ID) self._systray.show() icon = self._convert_icon(qt_notification.icon()) msg = self._format_message(qt_notification.message(), qt_notification.origin()) self._systray.showMessage(qt_notification.title(), msg, icon) return self.NOTIFICATION_ID def _convert_icon(self, image: QImage) -> QIcon: """Convert a QImage to a QIcon.""" if image.isNull(): return QIcon() pixmap = QPixmap.fromImage(image, Qt.ImageConversionFlag.NoFormatConversion) assert not pixmap.isNull() icon = QIcon(pixmap) assert not icon.isNull() return icon def _format_message(self, text: str, origin: QUrl) -> str: """Format the message to display.""" if not self._should_include_origin(origin): return text return origin.toDisplayString() + '\n\n' + text @pyqtSlot() def _on_systray_clicked(self) -> None: self.click_id.emit(self.NOTIFICATION_ID) @pyqtSlot(int) def on_web_closed(self, notification_id: int) -> None: assert notification_id == self.NOTIFICATION_ID, notification_id if not sip.isdeleted(self._systray): # This can get called during shutdown self._systray.hide() class MessagesNotificationAdapter(AbstractNotificationAdapter): """Shows notifications using qutebrowser messages. This is mostly used as a fallback if no other method is available. Most notification features are not supported. Note that it's expected for this adapter to never fail (i.e. not raise Error in __init__ and not emit the error signal), as it's used as a "last resort" fallback. """ NAME = "messages" def __init__(self, parent: QObject = None) -> None: super().__init__(parent) self._id_gen = itertools.count(1) def present( self, qt_notification: "QWebEngineNotification", *, replaces_id: Optional[int], ) -> int: markup = self._format_message(qt_notification) new_id = replaces_id if replaces_id is not None else next(self._id_gen) message.info(markup, replace=f'notifications-{new_id}') # Faking closing, timing might not be 100% accurate QTimer.singleShot( config.val.messages.timeout, lambda: self.close_id.emit(new_id)) return new_id @pyqtSlot(int) def on_web_closed(self, _notification_id: int) -> None: """We can't close messages.""" def _format_message(self, qt_notification: "QWebEngineNotification") -> str: title = html.escape(qt_notification.title()) body = html.escape(qt_notification.message()) hint = "" if qt_notification.icon().isNull() else " (image not shown)" if self._should_include_origin(qt_notification.origin()): url = html.escape(qt_notification.origin().toDisplayString()) origin_str = f" from {url}" else: origin_str = "" return ( f"<i>Notification{origin_str}:{hint}</i><br/><br/>" f"<b>{title}</b><br/>" f"{body}" ) class HerbeNotificationAdapter(AbstractNotificationAdapter): """Shows notifications using herbe. See https://github.com/dudik/herbe """ NAME = "herbe" def __init__(self, parent: QObject = None) -> None: super().__init__(parent) # Also cleans up potentially hanging semaphores from herbe. # https://github.com/dudik/herbe#notifications-dont-show-up try: subprocess.run(['herbe'], stderr=subprocess.DEVNULL, check=True) except OSError as e: raise Error(f'herbe error: {e}') except subprocess.CalledProcessError as e: if e.returncode != 1: raise Error(f'herbe exited with status {e.returncode}') def present( self, qt_notification: "QWebEngineNotification", *, replaces_id: Optional[int], ) -> int: if replaces_id is not None: self.on_web_closed(replaces_id) proc = QProcess(self) proc.errorOccurred.connect(self._on_error) lines = list(self._message_lines(qt_notification)) proc.start('herbe', lines) pid = proc.processId() assert pid > 1 proc.finished.connect(functools.partial(self._on_finished, pid)) return pid def _message_lines( self, qt_notification: "QWebEngineNotification", ) -> Iterator[str]: """Get the lines to display for this notification.""" yield qt_notification.title() origin = qt_notification.origin() if self._should_include_origin(origin): yield origin.toDisplayString() yield qt_notification.message() if not qt_notification.icon().isNull(): yield "(icon not shown)" def _on_finished(self, pid: int, code: int, status: QProcess.ExitStatus) -> None: """Handle a closing herbe process. From the GitHub page: - "An accepted notification always returns exit code 0." - "Dismissed notifications return exit code 2." Any other exit status should never happen. We ignore CrashExit as SIGUSR1/SIGUSR2 are expected "crashes", and for any other signals, we can't do much - emitting self.error would just go use herbe again, so there's no point. """ if status == QProcess.ExitStatus.CrashExit: return if code == 0: self.click_id.emit(pid) elif code == 2: self.close_id.emit(pid) else: proc = self.sender() stderr = proc.readAllStandardError() raise Error(f'herbe exited with status {code}: {stderr}') @pyqtSlot(QProcess.ProcessError) def _on_error(self, error: QProcess.ProcessError) -> None: if error == QProcess.ProcessError.Crashed: return name = debug.qenum_key(QProcess.ProcessError, error) raise Error(f'herbe process error: {name}') @pyqtSlot(int) def on_web_closed(self, notification_id: int) -> None: """Handle closing the notification from JS. From herbe's README: "A notification can be dismissed [...] [by] sending a SIGUSR1 signal to it" """ os.kill(notification_id, signal.SIGUSR1) # Make sure we immediately remove it from active notifications self.close_id.emit(notification_id) @dataclasses.dataclass class _ServerQuirks: """Quirks for certain DBus notification servers.""" spec_version: Optional[str] = None avoid_actions: bool = False avoid_body_hyperlinks: bool = False escape_title: bool = False icon_key: Optional[str] = None skip_capabilities: bool = False wrong_replaces_id: bool = False no_padded_images: bool = False @dataclasses.dataclass class _ServerCapabilities: """Notification capabilities supported by the server.""" actions: bool body_markup: bool body_hyperlinks: bool kde_origin_name: bool @classmethod def from_list(cls, capabilities: List[str]) -> "_ServerCapabilities": return cls( actions='actions' in capabilities, body_markup='body-markup' in capabilities, body_hyperlinks='body-hyperlinks' in capabilities, kde_origin_name='x-kde-origin-name' in capabilities, ) def _as_uint32(x: int) -> QVariant: """Convert the given int to an uint32 for DBus.""" variant = QVariant(x) successful = variant.convert(QVariant.Type.UInt) assert successful return variant class DBusNotificationAdapter(AbstractNotificationAdapter): """Send notifications over DBus. This is essentially what libnotify does, except using Qt's DBus implementation. Related specs: https://developer.gnome.org/notification-spec/ https://specifications.freedesktop.org/notification-spec/notification-spec-latest.html https://wiki.ubuntu.com/NotificationDevelopmentGuidelines """ SERVICE = "org.freedesktop.Notifications" TEST_SERVICE = "org.qutebrowser.TestNotifications" PATH = "/org/freedesktop/Notifications" INTERFACE = "org.freedesktop.Notifications" SPEC_VERSION = "1.2" # Released in January 2011, still current in March 2021. NAME = "libnotify" _NON_FATAL_ERRORS = { # notification daemon is gone "org.freedesktop.DBus.Error.NoReply", # https://gitlab.gnome.org/GNOME/gnome-flashback/-/blob/3.40.0/gnome-flashback/libnotifications/nd-daemon.c#L178-187 # Exceeded maximum number of notifications "org.freedesktop.Notifications.MaxNotificationsExceeded", # https://bugs.kde.org/show_bug.cgi?id=409157 # https://github.com/KDE/plasma-workspace/blob/v5.21.4/libnotificationmanager/server_p.cpp#L227-L237 # Created too many similar notifications in quick succession "org.freedesktop.Notifications.Error.ExcessNotificationGeneration", } def __init__(self, parent: QObject = None) -> None: super().__init__(bridge) assert _notifications_supported() if utils.is_windows: # The QDBusConnection destructor seems to cause error messages (and # potentially segfaults) on Windows, so we bail out early in that case. # We still try to get a connection on macOS, since it's theoretically # possible to run DBus there. raise Error("libnotify is not supported on Windows") bus = QDBusConnection.sessionBus() if not bus.isConnected(): raise Error( "Failed to connect to DBus session bus: " + self._dbus_error_str(bus.lastError())) self._watcher = QDBusServiceWatcher( self.SERVICE, bus, QDBusServiceWatcher.WatchModeFlag.WatchForUnregistration, self, ) self._watcher.serviceUnregistered.connect( # type: ignore[attr-defined] self._on_service_unregistered) test_service = 'test-notification-service' in objects.debug_flags service = self.TEST_SERVICE if test_service else self.SERVICE self.interface = QDBusInterface(service, self.PATH, self.INTERFACE, bus) if not self.interface.isValid(): raise Error( "Could not construct a DBus interface: " + self._dbus_error_str(self.interface.lastError())) connections = [ ("NotificationClosed", self._handle_close), ("ActionInvoked", self._handle_action), ] for name, func in connections: if not bus.connect(service, self.PATH, self.INTERFACE, name, func): raise Error( f"Could not connect to {name}: " + self._dbus_error_str(bus.lastError())) self._quirks = _ServerQuirks() if not test_service: # Can't figure out how to make this work with the test server... # https://www.riverbankcomputing.com/pipermail/pyqt/2021-March/043724.html self._get_server_info() if self._quirks.skip_capabilities: self._capabilities = _ServerCapabilities.from_list([]) else: self._fetch_capabilities() @pyqtSlot(str) def _on_service_unregistered(self) -> None: """Make sure we know when the notification daemon exits. If that's the case, we bail out, as otherwise notifications would fail or the next start of the server would lead to duplicate notification IDs. """ log.misc.debug("Notification daemon did quit!") self.clear_all.emit() def _find_quirks( # noqa: C901 ("too complex" self, name: str, vendor: str, ver: str, ) -> Optional[_ServerQuirks]: """Find quirks to use based on the server information.""" if (name, vendor) == ("notify-osd", "Canonical Ltd"): # Shows a dialog box instead of a notification bubble as soon as a # notification has an action (even if only a default one). Dialog boxes are # buggy and return a notification with ID 0. # https://wiki.ubuntu.com/NotificationDevelopmentGuidelines#Avoiding_actions return _ServerQuirks(avoid_actions=True, spec_version="1.1") elif (name, vendor) == ("Notification Daemon", "MATE"): # Still in active development but doesn't implement spec 1.2: # https://github.com/mate-desktop/mate-notification-daemon/issues/132 quirks = _ServerQuirks(spec_version="1.1") if utils.VersionNumber.parse(ver) <= utils.VersionNumber(1, 24): # https://github.com/mate-desktop/mate-notification-daemon/issues/118 quirks.avoid_body_hyperlinks = True return quirks elif (name, vendor) == ("naughty", "awesome") and ver != "devel": # Still in active development but spec 1.0/1.2 support isn't # released yet: # https://github.com/awesomeWM/awesome/commit/e076bc664e0764a3d3a0164dabd9b58d334355f4 parsed_version = utils.VersionNumber.parse(ver.lstrip('v')) if parsed_version <= utils.VersionNumber(4, 3): return _ServerQuirks(spec_version="1.0") elif (name, vendor) == ("twmnd", "twmnd"): # https://github.com/sboli/twmn/pull/96 return _ServerQuirks(spec_version="0") elif (name, vendor) == ("tiramisu", "Sweets"): # https://github.com/Sweets/tiramisu/issues/20 return _ServerQuirks(skip_capabilities=True) elif (name, vendor) == ("lxqt-notificationd", "lxqt.org"): quirks = _ServerQuirks() parsed_version = utils.VersionNumber.parse(ver) if parsed_version <= utils.VersionNumber(0, 16): # https://github.com/lxqt/lxqt-notificationd/issues/253 quirks.escape_title = True if parsed_version < utils.VersionNumber(0, 16): # https://github.com/lxqt/lxqt-notificationd/commit/c23e254a63c39837fb69d5c59c5e2bc91e83df8c quirks.icon_key = 'image_data' return quirks elif (name, vendor) == ("haskell-notification-daemon", "abc"): # aka "deadd" return _ServerQuirks( # https://github.com/phuhl/linux_notification_center/issues/160 spec_version="1.0", # https://github.com/phuhl/linux_notification_center/issues/161 wrong_replaces_id=True, ) elif (name, vendor) == ("ninomiya", "deifactor"): return _ServerQuirks( no_padded_images=True, wrong_replaces_id=True, ) elif (name, vendor) == ("Raven", "Budgie Desktop Developers"): return _ServerQuirks( # https://github.com/solus-project/budgie-desktop/issues/2114 escape_title=True, # https://github.com/solus-project/budgie-desktop/issues/2115 wrong_replaces_id=True, ) return None def _get_server_info(self) -> None: """Query notification server information and set quirks.""" reply = self.interface.call(QDBus.CallMode.BlockWithGui, "GetServerInformation") self._verify_message(reply, "ssss", QDBusMessage.MessageType.ReplyMessage) name, vendor, ver, spec_version = reply.arguments() log.misc.debug( f"Connected to notification server: {name} {ver} by {vendor}, " f"implementing spec {spec_version}") quirks = self._find_quirks(name, vendor, ver) if quirks is not None: log.misc.debug(f"Enabling quirks {quirks}") self._quirks = quirks expected_spec_version = self._quirks.spec_version or self.SPEC_VERSION if spec_version != expected_spec_version: log.misc.warning( f"Notification server ({name} {ver} by {vendor}) implements " f"spec {spec_version}, but {expected_spec_version} was expected. " f"If {name} is up to date, please report a qutebrowser bug.") # https://specifications.freedesktop.org/notification-spec/latest/ar01s08.html icon_key_overrides = { "1.0": "icon_data", "1.1": "image_data", } if spec_version in icon_key_overrides: self._quirks.icon_key = icon_key_overrides[spec_version] def _dbus_error_str(self, error: QDBusError) -> str: """Get a string for a DBus error.""" if not error.isValid(): return "Unknown error" return f"{error.name()} - {error.message()}" def _verify_message( self, msg: QDBusMessage, expected_signature: str, expected_type: QDBusMessage.MessageType, ) -> None: """Check the signature/type of a received message. Raises DBusError if the signature doesn't match. """ assert expected_type not in [ QDBusMessage.MessageType.ErrorMessage, QDBusMessage.MessageType.InvalidMessage, ], expected_type if msg.type() == QDBusMessage.MessageType.ErrorMessage: err = msg.errorName() if err in self._NON_FATAL_ERRORS: self.error.emit(msg.errorMessage()) return raise Error(f"Got DBus error: {err} - {msg.errorMessage()}") signature = msg.signature() if signature != expected_signature: raise Error( f"Got a message with signature {signature} but expected " f"{expected_signature} (args: {msg.arguments()})") typ = msg.type() if typ != expected_type: type_str = debug.qenum_key(QDBusMessage.MessageType, typ) expected_type_str = debug.qenum_key(QDBusMessage.MessageType, expected_type) raise Error( f"Got a message of type {type_str} but expected {expected_type_str}" f"(args: {msg.arguments()})") def present( self, qt_notification: "QWebEngineNotification", *, replaces_id: Optional[int], ) -> int: """Shows a notification over DBus.""" if replaces_id is None: replaces_id = 0 # 0 is never a valid ID according to the spec actions = [] if self._capabilities.actions: actions = ['default', 'Activate'] # key, name actions_arg = QDBusArgument(actions, QMetaType.Type.QStringList) origin_url_str = qt_notification.origin().toDisplayString() hints: Dict[str, Any] = { # Include the origin in case the user wants to do different things # with different origin's notifications. "x-qutebrowser-origin": origin_url_str, "desktop-entry": "org.qutebrowser.qutebrowser", } is_useful_origin = self._should_include_origin(qt_notification.origin()) if self._capabilities.kde_origin_name and is_useful_origin: hints["x-kde-origin-name"] = origin_url_str icon = qt_notification.icon() if icon.isNull(): filename = ':/icons/qutebrowser-64x64.png' icon = QImage(filename) key = self._quirks.icon_key or "image-data" data = self._convert_image(icon) if data is not None: hints[key] = data # Titles don't support markup (except with broken servers) title = qt_notification.title() if self._quirks.escape_title: title = html.escape(title, quote=False) reply = self.interface.call( QDBus.CallMode.BlockWithGui, "Notify", "qutebrowser", # application name _as_uint32(replaces_id), # replaces notification id "", # icon name/file URL, we use image-data and friends instead. title, self._format_body(qt_notification.message(), qt_notification.origin()), actions_arg, hints, -1, # timeout; -1 means 'use default' ) self._verify_message(reply, "u", QDBusMessage.MessageType.ReplyMessage) notification_id = reply.arguments()[0] if replaces_id not in [0, notification_id]: msg = ( f"Wanted to replace notification {replaces_id} but got new id " f"{notification_id}." ) if self._quirks.wrong_replaces_id: log.misc.debug(msg) else: log.misc.error(msg) return notification_id def _convert_image(self, qimage: QImage) -> Optional[QDBusArgument]: """Convert a QImage to the structure DBus expects. https://specifications.freedesktop.org/notification-spec/latest/ar01s05.html#icons-and-images-formats """ bits_per_color = 8 has_alpha = qimage.hasAlphaChannel() if has_alpha: image_format = QImage.Format.Format_RGBA8888 channel_count = 4 else: image_format = QImage.Format.Format_RGB888 channel_count = 3 qimage.convertTo(image_format) bytes_per_line = qimage.bytesPerLine() width = qimage.width() height = qimage.height() image_data = QDBusArgument() image_data.beginStructure() image_data.add(width) image_data.add(height) image_data.add(bytes_per_line) image_data.add(has_alpha) image_data.add(bits_per_color) image_data.add(channel_count) try: size = qimage.sizeInBytes() except TypeError: # WORKAROUND for # https://www.riverbankcomputing.com/pipermail/pyqt/2020-May/042919.html # byteCount() is obsolete, but sizeInBytes() is only available with # SIP >= 5.3.0. size = qimage.byteCount() # Despite the spec not mandating this, many notification daemons mandate that # the last scanline does not have any padding bytes. # # Or in the words of dunst: # # The image is serialised rowwise pixel by pixel. The rows are aligned by a # spacer full of garbage. The overall data length of data + garbage is # called the rowstride. # # Mind the missing spacer at the last row. # # len: |<--------------rowstride---------------->| # len: |<-width*pixelstride->| # row 1: | data for row 1 | spacer of garbage | # row 2: | data for row 2 | spacer of garbage | # | . | spacer of garbage | # | . | spacer of garbage | # | . | spacer of garbage | # row n-1: | data for row n-1 | spacer of garbage | # row n: | data for row n | # # Source: # https://github.com/dunst-project/dunst/blob/v1.6.1/src/icon.c#L292-L309 padding = bytes_per_line - width * channel_count assert 0 <= padding <= 3, (padding, bytes_per_line, width, channel_count) size -= padding if padding and self._quirks.no_padded_images: return None bits = qimage.constBits().asstring(size) image_data.add(QByteArray(bits)) image_data.endStructure() return image_data @pyqtSlot(QDBusMessage) def _handle_close(self, msg: QDBusMessage) -> None: """Handle NotificationClosed from DBus.""" self._verify_message(msg, "uu", QDBusMessage.MessageType.SignalMessage) notification_id, _close_reason = msg.arguments() self.close_id.emit(notification_id) @pyqtSlot(QDBusMessage) def _handle_action(self, msg: QDBusMessage) -> None: """Handle ActionInvoked from DBus.""" self._verify_message(msg, "us", QDBusMessage.MessageType.SignalMessage) notification_id, action_key = msg.arguments() if action_key == "default": self.click_id.emit(notification_id) @pyqtSlot(int) def on_web_closed(self, notification_id: int) -> None: """Send CloseNotification if a notification was closed from JS.""" self.interface.call( QDBus.CallMode.NoBlock, "CloseNotification", _as_uint32(notification_id), ) def _fetch_capabilities(self) -> None: """Fetch capabilities from the notification server.""" reply = self.interface.call( QDBus.CallMode.BlockWithGui, "GetCapabilities", ) self._verify_message(reply, "as", QDBusMessage.MessageType.ReplyMessage) caplist = reply.arguments()[0] self._capabilities = _ServerCapabilities.from_list(caplist) if self._quirks.avoid_actions: self._capabilities.actions = False if self._quirks.avoid_body_hyperlinks: self._capabilities.body_hyperlinks = False log.misc.debug(f"Notification server capabilities: {self._capabilities}") def _format_body(self, body: str, origin_url: QUrl) -> str: """Format the body according to the server capabilities. If the server doesn't support x-kde-origin-name, we include the origin URL as a prefix. If possible, we hyperlink it. For both prefix and body, we'll need to HTML escape it if the server supports body markup. """ urlstr = origin_url.toDisplayString() is_useful_origin = self._should_include_origin(origin_url) if self._capabilities.kde_origin_name or not is_useful_origin: prefix = None elif self._capabilities.body_markup and self._capabilities.body_hyperlinks: href = html.escape( origin_url.toString(QUrl.ComponentFormattingOption.FullyEncoded) # type: ignore[arg-type] ) text = html.escape(urlstr, quote=False) prefix = f'<a href="{href}">{text}</a>' elif self._capabilities.body_markup: prefix = html.escape(urlstr, quote=False) else: prefix = urlstr if self._capabilities.body_markup: body = html.escape(body, quote=False) if prefix is None: return body return prefix + '\n\n' + body
1
26,484
This is extra awkward, QVariant.convert() takes a QMetaType which takes the value of a QMetaType.Type.
qutebrowser-qutebrowser
py
@@ -59,6 +59,12 @@ type Writer struct { // These fields exist only when w is not created in the first place when // NewWriter is called. + // + // A ctx is stored in the Writer since we need to pass it into NewTypedWriter + // when we finished detecting the content type of the object and create the + // underlying driver.Writer. This step happens inside Write or Close and + // neither of them take a context.Context as an argument. The ctx must be set + // to nil after we have done passing it. ctx context.Context bucket driver.Bucket key string
1
// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package blob provides an easy way to interact with Blob objects within // a bucket. It utilizes standard io packages to handle reads and writes. package blob import ( "bytes" "context" "errors" "mime" "net/http" "github.com/google/go-cloud/blob/driver" ) // Reader implements io.ReadCloser to read a blob. It must be closed after // reads are finished. type Reader struct { r driver.Reader } // Read implements io.ReadCloser to read from this reader. func (r *Reader) Read(p []byte) (int, error) { return r.r.Read(p) } // Close implements io.ReadCloser to close this reader. func (r *Reader) Close() error { return r.r.Close() } // ContentType returns the MIME type of the blob object. func (r *Reader) ContentType() string { return r.r.Attrs().ContentType } // Size returns the content size of the blob object. func (r *Reader) Size() int64 { return r.r.Attrs().Size } // Writer implements io.WriteCloser to write a blob. It must be closed after // all writes are done. type Writer struct { w driver.Writer // These fields exist only when w is not created in the first place when // NewWriter is called. ctx context.Context bucket driver.Bucket key string opt *driver.WriterOptions buf *bytes.Buffer } // sniffLen is the byte size of Writer.buf used to detect content-type. const sniffLen = 512 // Write implements the io.Writer interface. // // The writes happen asynchronously, which means the returned error can be nil // even if the actual write fails. Use the error returned from Close to // check and handle errors. func (w *Writer) Write(p []byte) (n int, err error) { if w.w != nil { return w.w.Write(p) } // If w is not yet created due to no content-type being passed in, try to sniff // the MIME type based on at most 512 bytes of the blob content of p. // Detect the content-type directly if the first chunk is at least 512 bytes. if w.buf.Len() == 0 && len(p) >= sniffLen { return w.open(p) } // Store p in w.buf and detect the content-type when the size of content in // w.buf is at least 512 bytes. w.buf.Write(p) if w.buf.Len() >= sniffLen { return w.open(w.buf.Bytes()) } return len(p), nil } // Close flushes any buffered data and completes the Write. It is the user's // responsibility to call it after finishing the write and handle the error if returned. func (w *Writer) Close() error { if w.w != nil { return w.w.Close() } if _, err := w.open(w.buf.Bytes()); err != nil { return err } return w.w.Close() } // open tries to detect the MIME type of p and write it to the blob. func (w *Writer) open(p []byte) (n int, err error) { ct := http.DetectContentType(p) if w.w, err = w.bucket.NewTypedWriter(w.ctx, w.key, ct, w.opt); err != nil { return 0, err } w.buf = nil w.ctx = nil w.key = "" w.opt = nil return w.w.Write(p) } // Bucket manages the underlying blob service and provides read, write and delete // operations on given object within it. type Bucket struct { b driver.Bucket } // NewBucket creates a new Bucket for a group of objects for a blob service. func NewBucket(b driver.Bucket) *Bucket { return &Bucket{b: b} } // NewReader returns a Reader to read from an object, or an error when the object // is not found by the given key, which can be checked by calling IsNotExist. // // The caller must call Close on the returned Reader when done reading. func (b *Bucket) NewReader(ctx context.Context, key string) (*Reader, error) { return b.NewRangeReader(ctx, key, 0, -1) } // NewRangeReader returns a Reader that reads part of an object, reading at // most length bytes starting at the given offset. If length is 0, it will read // only the metadata. If length is negative, it will read till the end of the // object. It returns an error if that object does not exist, which can be // checked by calling IsNotExist. // // The caller must call Close on the returned Reader when done reading. func (b *Bucket) NewRangeReader(ctx context.Context, key string, offset, length int64) (*Reader, error) { if offset < 0 { return nil, errors.New("new blob range reader: offset must be non-negative") } r, err := b.b.NewRangeReader(ctx, key, offset, length) return &Reader{r: r}, newBlobError(err) } // NewWriter returns a Writer that writes to an object associated with key. // // A new object will be created unless an object with this key already exists. // Otherwise any previous object with the same key will be replaced. The object // is not guaranteed to be available until Close has been called. // // The caller must call Close on the returned Writer when done writing. func (b *Bucket) NewWriter(ctx context.Context, key string, opt *WriterOptions) (*Writer, error) { var dopt *driver.WriterOptions var w driver.Writer if opt != nil { dopt = &driver.WriterOptions{ BufferSize: opt.BufferSize, } if opt.ContentType != "" { t, p, err := mime.ParseMediaType(opt.ContentType) if err != nil { return nil, err } ct := mime.FormatMediaType(t, p) w, err = b.b.NewTypedWriter(ctx, key, ct, dopt) return &Writer{w: w}, err } } return &Writer{ ctx: ctx, bucket: b.b, key: key, opt: dopt, buf: bytes.NewBuffer([]byte{}), }, nil } // Delete deletes the object associated with key. It returns an error if that // object does not exist, which can be checked by calling IsNotExist. func (b *Bucket) Delete(ctx context.Context, key string) error { return newBlobError(b.b.Delete(ctx, key)) } // WriterOptions controls Writer behaviors. type WriterOptions struct { // BufferSize changes the default size in bytes of the maximum part Writer can // write in a single request. Larger objects will be split into multiple requests. // // The support specification of this operation varies depending on the underlying // blob service. If zero value is given, it is set to a reasonable default value. // If negative value is given, it will be either disabled (if supported by the // service), which means Writer will write as a whole, or reset to default value. // It could be a no-op when not supported at all. // // If the Writer is used to write small objects concurrently, set the buffer size // to a smaller size to avoid high memory usage. BufferSize int // ContentType specifies the MIME type of the object being written. If not set, // then it will be inferred from the content using the algorithm described at // http://mimesniff.spec.whatwg.org/ ContentType string } type blobError struct { msg string kind driver.ErrorKind } func (e *blobError) Error() string { return e.msg } func newBlobError(err error) error { if err == nil { return nil } berr := &blobError{msg: err.Error()} if e, ok := err.(driver.Error); ok { berr.kind = e.BlobError() } return berr } // IsNotExist returns whether an error is a driver.Error with NotFound kind. func IsNotExist(err error) bool { if e, ok := err.(*blobError); ok { return e.kind == driver.NotFound } return false }
1
10,779
Nit: finished -> finish, "done passing it" -> "passed it".
google-go-cloud
go
@@ -46,5 +46,18 @@ module C2 config.autoload_paths << Rails.root.join('lib') config.assets.precompile << 'common/communicarts.css' + + # Paperclip's attachment settings are determined by S3 env vars + if ENV['S3_BUCKET_NAME'] && ENV['S3_ACCESS_KEY_ID'] && ENV['S3_SECRET_ACCESS_KEY'] + Paperclip::Attachment.default_options.merge!( + bucket: ENV['S3_BUCKET_NAME'], + s3_credentials: { + access_key_id: ENV['S3_ACCESS_KEY_ID'], + secret_access_key: ENV['S3_SECRET_ACCESS_KEY'] + }, + s3_permissions: :private, + storage: :s3, + ) + end end end
1
require File.expand_path('../boot', __FILE__) require 'rails/all' # Require the gems listed in Gemfile, including any gems # you've limited to :test, :development, or :production. Bundler.require(:default, Rails.env) module C2 class Application < Rails::Application # Settings in config/environments/* take precedence over those specified here. # Application configuration should go into files in config/initializers # -- all .rb files in that directory are automatically loaded. # Set Time.zone default to the specified zone and make Active Record auto-convert to this zone. # Run "rake -D time" for a list of tasks for finding time zone names. Default is UTC. # config.time_zone = 'Central Time (US & Canada)' # The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded. # config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s] # config.i18n.default_locale = :de # http://git.io/ETVYsQ config.middleware.insert_before 0, Rack::Cors, logger: Rails.logger do allow do origins '*' resource '*', headers: :any, methods: [:get, :post, :delete, :put, :options, :head], max_age: 1728000 end end config.middleware.use(Rack::SslEnforcer) if ENV['FORCE_HTTPS'] == 'true' config.action_mailer.raise_delivery_errors = true config.action_mailer.default_url_options = { scheme: ENV['DEFAULT_URL_SCHEME'] || 'http', host: ENV['HOST_URL'] || ENV['DEFAULT_URL_HOST'] || 'localhost', port: ENV['DEFAULT_URL_PORT'] || 3000 } config.roadie.url_options = config.action_mailer.default_url_options config.autoload_paths << Rails.root.join('lib') config.assets.precompile << 'common/communicarts.css' end end
1
12,901
Maybe mention that it will be saved to the filesystem otherwise?
18F-C2
rb
@@ -108,7 +108,11 @@ namespace pwiz.Skyline.FileUI { for (var i = 0; i < numColumns; i++) { - dataGrid.Columns[i].ToolTipText = + // In the case when we don't have user provided headers, we still want localized headers that can be translated, + // this replaces the autogenerated strings with a localized version + dataGrid.Columns[i].HeaderText = string.Format(Resources.ImportTransitionListColumnSelectDlg_DisplayData_Column_) + (i+1).ToString(); + + dataGrid.Columns[i].ToolTipText = string.Format(Resources.ImportTransitionListColumnSelectDlg_DisplayData_The_input_text_did_not_appear_to_contain_column_headers__Use_the_dropdown_control_to_assign_column_meanings_for_import_); } }
1
/* * Original author: Alex MacLean <alex.maclean2000 .at. gmail.com>, * MacCoss Lab, Department of Genome Sciences, UW * * Copyright 2018 University of Washington - Seattle, WA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using pwiz.Skyline.Alerts; using pwiz.Skyline.Model; using pwiz.Skyline.Model.DocSettings; using pwiz.Skyline.Model.Lib; using pwiz.Skyline.Properties; using pwiz.Skyline.Util; using pwiz.Skyline.Util.Extensions; using System; using System.Collections.Generic; using System.Data; using System.Drawing; using System.Linq; using System.Windows.Forms; using pwiz.Skyline.Controls; namespace pwiz.Skyline.FileUI { public partial class ImportTransitionListColumnSelectDlg : ModeUIInvariantFormEx { public MassListImporter Importer { get; set; } public List<ComboBox> ComboBoxes { get; private set; } public bool WindowShown { get; private set; } // These are only for error checking private readonly SrmDocument _docCurrent; private readonly MassListInputs _inputs; private readonly IdentityPath _insertPath; public ImportTransitionListColumnSelectDlg(MassListImporter importer, SrmDocument docCurrent, MassListInputs inputs, IdentityPath insertPath) { Importer = importer; _docCurrent = docCurrent; _inputs = inputs; _insertPath = insertPath; InitializeComponent(); fileLabel.Text = Importer.Inputs.InputFilename; InitializeComboBoxes(); DisplayData(); PopulateComboBoxes(); //dataGrid.Update(); ResizeComboBoxes(); } private void DisplayData() { // The pasted data will be stored as a data table var table = new DataTable("TransitionList"); // Create the first row of columns var numColumns = Importer.RowReader.Lines[0].ParseDsvFields(Importer.Separator).Length; for (var i = 0; i < numColumns; i++) table.Columns.Add().DataType = typeof(string); // These dots are a placeholder for where the combo boxes will be var dots = Enumerable.Repeat(@"...", numColumns).ToArray(); // The first row will actually be combo boxes, but we use dots as a placeholder because we can't put combo boxes in a data table table.Rows.Add(dots); // Add the data for (var index = 0; index < Math.Min(100, Importer.RowReader.Lines.Count); index++) { var line = Importer.RowReader.Lines[index]; table.Rows.Add(line.ParseDsvFields(Importer.Separator)); } // Don't bother displaying more than 100 lines of data if (Importer.RowReader.Lines.Count > 100) table.Rows.Add(dots); // Set the table as the source for the DataGridView that the user sees. dataGrid.DataSource = table; var headers = Importer.RowReader.Indices.Headers; if (headers != null && headers.Length > 0) { for (var i = 0; i < numColumns; i++) { dataGrid.Columns[i].HeaderText = headers[i]; dataGrid.Columns[i].ToolTipText = string.Format(Resources.ImportTransitionListColumnSelectDlg_DisplayData_This_column_is_labeled_with_the_header___0___in_the_input_text__Use_the_dropdown_control_to_assign_its_meaning_for_import_, headers[i]); } dataGrid.ColumnHeadersVisible = true; } else { for (var i = 0; i < numColumns; i++) { dataGrid.Columns[i].ToolTipText = string.Format(Resources.ImportTransitionListColumnSelectDlg_DisplayData_The_input_text_did_not_appear_to_contain_column_headers__Use_the_dropdown_control_to_assign_column_meanings_for_import_); } } dataGrid.ScrollBars = dataGrid.Rows.Count * dataGrid.Rows[0].Height + dataGrid.ColumnHeadersHeight + SystemInformation.HorizontalScrollBarHeight > dataGrid.Height ? ScrollBars.Both : ScrollBars.Horizontal; } private void InitializeComboBoxes() { ComboBoxes = new List<ComboBox>(); for (var i = 0; i < Importer.RowReader.Lines[0].ParseDsvFields(Importer.Separator).Length; i++) { var combo = new ComboBox { DropDownStyle = ComboBoxStyle.DropDownList }; ComboBoxes.Add(combo); comboPanelInner.Controls.Add(combo); combo.BringToFront(); } } private void PopulateComboBoxes() { foreach (var comboBox in ComboBoxes) { comboBox.Text = string.Empty; comboBox.Items.AddRange(new object[] { Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Ignore_Column, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Decoy, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_iRT, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Label_Type, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Library_Intensity, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Peptide_Modified_Sequence, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Precursor_m_z, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Product_m_z, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Protein_Name, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Fragment_Name, // Commented out for consistency because there is no product charge column // Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Precursor_Charge }); comboBox.SelectedIndex = 0; comboBox.SelectedIndexChanged += ComboChanged; ComboHelper.AutoSizeDropDown(comboBox); } var columns = Importer.RowReader.Indices; // It's not unusual to see lines like "744.8 858.39 10 APR.AGLCQTFVYGGCR.y7.light 105 40" where protein, peptide, and label are all stuck together, // so that all three lay claim to a single column. In such cases, prioritize peptide. columns.PrioritizePeptideColumn(); var headers = Importer.RowReader.Indices.Headers; // Checks if the headers of the current list are the same as the headers of the previous list, // because if they are then we want to prioritize user headers bool sameHeaders = false; if (headers != null) { sameHeaders = (headers.ToList().SequenceEqual(Settings.Default.CustomImportTransitionListHeaders)); } // If there are items on our saved column list and the file does not contain headers (or the headers are the same as the previous file), // and the number of columns matches the saved column count then the combo box text is set using that list if ((Settings.Default.CustomImportTransitionListColumnsList.Count != 0) && ((headers == null) || (sameHeaders)) && Importer.RowReader.Lines[0].ParseDsvFields(Importer.Separator).Length == Settings.Default.CustomImportTransitionListColumnCount) { for (int i = 0; i < Settings.Default.CustomImportTransitionListColumnsList.Count; i++) { // The method is called for every tuplet on the list. Item 1 is the index position and item 2 is the name SetComboBoxText(Settings.Default.CustomImportTransitionListColumnsList[i].Item1, Settings.Default.CustomImportTransitionListColumnsList[i].Item2); ComboChanged(ComboBoxes[i], new EventArgs()); } } else { SetComboBoxText(columns.DecoyColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Decoy); SetComboBoxText(columns.IrtColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_iRT); SetComboBoxText(columns.LabelTypeColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Label_Type); SetComboBoxText(columns.LibraryColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Library_Intensity); SetComboBoxText(columns.PeptideColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Peptide_Modified_Sequence); SetComboBoxText(columns.PrecursorColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Precursor_m_z); SetComboBoxText(columns.ProductColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Product_m_z); SetComboBoxText(columns.ProteinColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Protein_Name); SetComboBoxText(columns.FragmentNameColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Fragment_Name); // Commented out for consistency because there is no product charge column // SetComboBoxText(columns.PrecursorChargeColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Precursor_Charge); } } public void ResizeComboBoxes() { const int gridBorderWidth = 1; comboPanelOuter.Location = new Point(dataGrid.Location.X + gridBorderWidth, dataGrid.Location.Y + (dataGrid.ColumnHeadersVisible ? dataGrid.ColumnHeadersHeight : 1)); var xOffset = 0; var height = 0; for (var i = 0; i < dataGrid.Columns.Count; i++) { var column = dataGrid.Columns[i]; var comboBox = ComboBoxes[i]; comboBox.Location = new Point(xOffset, 0); comboBox.Width = column.Width; // + ((i == dataGrid.Columns.Count - 1) ? 1 : 1); Playing with missing line on last combo box height = Math.Max(height, comboBox.Height); xOffset += column.Width; } var scrollBars = dataGrid.ScrollBars == ScrollBars.Both; var scrollWidth = SystemInformation.VerticalScrollBarWidth; var gridWidth = dataGrid.Size.Width - (scrollBars ? scrollWidth : 0) - (2 * gridBorderWidth); comboPanelOuter.Size = new Size(gridWidth, height); comboPanelInner.Size = new Size(xOffset, height); comboPanelInner.Location = new Point(-dataGrid.HorizontalScrollingOffset, 0); } // Sets the text of a combo box, with error checking private void SetComboBoxText(int comboBoxIndex, string text) { if (comboBoxIndex < 0 || comboBoxIndex >= ComboBoxes.Count) return; ComboBoxes[comboBoxIndex].Text = text; SetColumnColor(ComboBoxes[comboBoxIndex]); } // Ensures two combo boxes do not have the same value. Usually newSelectedIndex will be zero, because that is IgnoreColumn. private void CheckForComboBoxOverlap(int indexOfPreviousComboBox, int newSelectedIndex, int indexOfNewComboBox) { if (indexOfPreviousComboBox == indexOfNewComboBox || indexOfPreviousComboBox < 0 || indexOfPreviousComboBox >= ComboBoxes.Count) return; ComboBoxes[indexOfPreviousComboBox].SelectedIndex = newSelectedIndex; } private void SetColumnColor(ComboBox comboBox) { var comboBoxIndex = ComboBoxes.IndexOf(comboBox); // Grey out any ignored column var foreColor = Equals(comboBox.Text, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Ignore_Column) ? SystemColors.GrayText : dataGrid.ForeColor; dataGrid.Columns[comboBoxIndex].DefaultCellStyle.ForeColor = foreColor; } private void OnColumnsShown(object sender, EventArgs e) { foreach (var comboBox in ComboBoxes) { SetColumnColor(comboBox); } WindowShown = true; } private bool comboBoxChanged; // Callback for when a combo box is changed. We use it to update the index of the PeptideColumnIndices and preventing combo boxes from overlapping. private void ComboChanged(object sender, EventArgs e) // CONSIDER(bspratt) no charge state columns? (Seems to be because Skyline infers these and is confused when given explicit values) { var comboBox = (ComboBox) sender; var comboBoxIndex = ComboBoxes.IndexOf(comboBox); var columns = Importer.RowReader.Indices; comboBoxChanged = true; // Grey out any ignored column SetColumnColor(comboBox); if (comboBox.Text == Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Decoy) { CheckForComboBoxOverlap(columns.DecoyColumn, 0, comboBoxIndex); columns.ResetDuplicateColumns(comboBoxIndex); columns.DecoyColumn = comboBoxIndex; } else if (comboBox.Text == Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_iRT) { CheckForComboBoxOverlap(columns.IrtColumn, 0, comboBoxIndex); columns.ResetDuplicateColumns(comboBoxIndex); columns.IrtColumn = comboBoxIndex; } else if (comboBox.Text == Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Label_Type) { CheckForComboBoxOverlap(columns.LabelTypeColumn, 0, comboBoxIndex); columns.ResetDuplicateColumns(comboBoxIndex); columns.LabelTypeColumn = comboBoxIndex; } else if (comboBox.Text == Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Library_Intensity) { CheckForComboBoxOverlap(columns.LibraryColumn, 0, comboBoxIndex); columns.ResetDuplicateColumns(comboBoxIndex); columns.LibraryColumn = comboBoxIndex; } else if (comboBox.Text == Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Peptide_Modified_Sequence) { CheckForComboBoxOverlap(columns.PeptideColumn, 0, comboBoxIndex); columns.ResetDuplicateColumns(comboBoxIndex); columns.PeptideColumn = comboBoxIndex; } else if (comboBox.Text == Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Precursor_m_z) { CheckForComboBoxOverlap(columns.PrecursorColumn, 0, comboBoxIndex); columns.ResetDuplicateColumns(comboBoxIndex); columns.PrecursorColumn = comboBoxIndex; } else if (comboBox.Text == Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Product_m_z) { CheckForComboBoxOverlap(columns.ProductColumn, 0, comboBoxIndex); columns.ResetDuplicateColumns(comboBoxIndex); columns.ProductColumn = comboBoxIndex; } else if (comboBox.Text == Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Protein_Name) { CheckForComboBoxOverlap(columns.ProteinColumn, 0, comboBoxIndex); columns.ResetDuplicateColumns(comboBoxIndex); columns.ProteinColumn = comboBoxIndex; } else if (comboBox.Text == Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Fragment_Name) { CheckForComboBoxOverlap(columns.FragmentNameColumn, 0, comboBoxIndex); columns.ResetDuplicateColumns(comboBoxIndex); columns.FragmentNameColumn = comboBoxIndex; } // Commented out for consistency because there is no product charge column /*else if (comboBox.Text == Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Precursor_Charge) { CheckForComboBoxOverlap(columns.PrecursorChargeColumn, 0, comboBoxIndex); columns.ResetDuplicateColumns(comboBoxIndex); columns.PrecursorChargeColumn = comboBoxIndex; }*/ else { if (columns.DecoyColumn == comboBoxIndex) columns.DecoyColumn = -1; if (columns.IrtColumn == comboBoxIndex) columns.IrtColumn = -1; if (columns.LabelTypeColumn == comboBoxIndex) columns.LabelTypeColumn = -1; if (columns.LibraryColumn == comboBoxIndex) columns.LibraryColumn = -1; if (columns.PeptideColumn == comboBoxIndex) columns.PeptideColumn = -1; if (columns.PrecursorColumn == comboBoxIndex) columns.PrecursorColumn = -1; if (columns.ProductColumn == comboBoxIndex) columns.ProductColumn = -1; if (columns.ProteinColumn == comboBoxIndex) columns.ProteinColumn = -1; if (columns.FragmentNameColumn == comboBoxIndex) columns.FragmentNameColumn = -1; // Commented out for consistency because there is no product charge column // if (columns.PrecursorChargeColumn == comboBoxIndex) columns.PrecursorChargeColumn = -1; } } // Saves column positions between transition lists private void UpdateColumnsList() { var ColumnList = new List<Tuple<int, string>>(); var numColumns = Importer.RowReader.Lines[0].ParseDsvFields(Importer.Separator).Length; // Adds every column to the list as pairs: the index position and the name for (int i = 0; i < numColumns; i++) { ColumnList.Add(new Tuple<int, string>(i, ComboBoxes[i].Text)); } Settings.Default.CustomImportTransitionListColumnsList = ColumnList; Settings.Default.CustomImportTransitionListColumnCount = numColumns; } // Saves a list of the current document's headers, if any exist, so that they can be compared to those of the next document private void UpdateHeadersList() { var headers = Importer.RowReader.Indices.Headers; if (headers != null && headers.Length > 0) { Settings.Default.CustomImportTransitionListHeaders = headers.ToList(); } } private void DataGrid_ColumnWidthChanged(object sender, DataGridViewColumnEventArgs e) { ResizeComboBoxes(); } private void DataGrid_ColumnHeadersHeightChanged(object sender, EventArgs e) { ResizeComboBoxes(); } private void DataGrid_Scroll(object sender, ScrollEventArgs e) { comboPanelInner.Location = new Point(-dataGrid.HorizontalScrollingOffset, 0); } // If a combo box was changed, save the column indices and column count when the OK button is clicked private void ButtonOk_Click(object sender, EventArgs e) { OkDialog(); } public void OkDialog() { if (comboBoxChanged) { UpdateColumnsList(); UpdateHeadersList(); } if (CheckForErrors(true)) // Look for errors, be silent on success return; Assume.IsNotNull(InsertionParams); DialogResult = DialogResult.OK; } private void ButtonCheckForErrors_Click(object sender, EventArgs e) { CheckForErrors(); } public void CheckForErrors() { CheckForErrors(false); } private static List<string> MissingEssentialColumns { get; set; } // If an essential column is missing, add it to a list to display later private void CheckEssentialColumn(Tuple<int, string> column) { if (column.Item1 == -1) { MissingEssentialColumns.Add(column.Item2); } } public class DocumentChecked { public SrmDocument Document; public IdentityPath SelectPath; public List<MeasuredRetentionTime> IrtPeptides; public List<SpectrumMzInfo> LibrarySpectra; public List<PeptideGroupDocNode> PeptideGroups; } public DocumentChecked InsertionParams { get; private set; } /// <summary> /// Parse the mass list text, then show a status dialog if: /// errors are found, or /// errors are not found and "silentSuccess" arg is false /// Shows a special error message and forces the user to alter their entry if the list is missing Precursor m/z, Product m/z or Peptide Sequence. /// Return false if no errors found. /// </summary> /// <param name="silentSuccess">If true, don't show the confirmation dialog when there are no errors</param> /// <returns>True if list contains any errors and user does not elect to ignore them</returns> private bool CheckForErrors(bool silentSuccess) { var insertionParams = new DocumentChecked(); List<TransitionImportErrorInfo> testErrorList = null; var errorCheckCanceled = true; using (var longWaitDlg = new LongWaitDlg { Text = Resources.ImportTransitionListColumnSelectDlg_CheckForErrors_Checking_for_errors___ }) { longWaitDlg.PerformWork(this, 1000, progressMonitor => { var columns = Importer.RowReader.Indices; MissingEssentialColumns = new List<string>(); CheckEssentialColumn(new Tuple<int, string>(columns.PeptideColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Peptide_Modified_Sequence)); CheckEssentialColumn(new Tuple<int, string>(columns.PrecursorColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Precursor_m_z)); CheckEssentialColumn(new Tuple<int, string>(columns.ProductColumn, Resources.ImportTransitionListColumnSelectDlg_PopulateComboBoxes_Product_m_z)); insertionParams.Document = _docCurrent.ImportMassList(_inputs, Importer, progressMonitor, _insertPath, out insertionParams.SelectPath, out insertionParams.IrtPeptides, out insertionParams.LibrarySpectra, out testErrorList, out insertionParams.PeptideGroups); errorCheckCanceled = progressMonitor.IsCanceled; }); } if (errorCheckCanceled) { return true; // User cancelled, we can't say that there are no errors } if (testErrorList != null && testErrorList.Any()) { // There are errors, show them to user var isErrorAll = ReferenceEquals(insertionParams.Document, _docCurrent); if (MissingEssentialColumns.Count != 0) { // If the transition list is missing essential columns, tell the user in a // readable way MessageDlg.Show(this, TextUtil.SpaceSeparate(Resources.ImportTransitionListErrorDlg_ImportTransitionListErrorDlg_This_transition_list_cannot_be_imported_as_it_does_not_provide_values_for_, TextUtil.SpaceSeparate(MissingEssentialColumns))); return true; // There are errors } else { using (var dlg = new ImportTransitionListErrorDlg(testErrorList, isErrorAll, silentSuccess)) { if (dlg.ShowDialog(this) != DialogResult.OK) return true; // There are errors, and user does not want to ignore them } } } else if (!silentSuccess) { // No errors, confirm this to user MessageDlg.Show(this, Resources.PasteDlg_ShowNoErrors_No_errors); } InsertionParams = insertionParams; return false; // No errors } private void dataGrid_ColumnAdded(object sender, DataGridViewColumnEventArgs e) { ResizeComboBoxes(); } private void form_Resize(object sender, EventArgs e) { ResizeComboBoxes(); } } }
1
14,323
Probably cleaner and more general to do this: string,Format("Column {0}", i+1) That way if the local language has a convention like "3 column, this is" it still works.
ProteoWizard-pwiz
.cs
@@ -74,11 +74,14 @@ func (v *validator) verifyActions(blk *Block, containCoinbase bool) error { // Verify transfers, votes, executions, witness, and secrets (balance is checked in RunActions) confirmedNonceMap := make(map[string]uint64) accountNonceMap := make(map[string][]uint64) + + transfers, votes, executions := action.ClassifyActions(blk.Actions) var wg sync.WaitGroup - wg.Add(len(blk.Transfers) + len(blk.Votes) + len(blk.Executions)) + wg.Add(len(transfers) + len(votes) + len(executions)) + var correctAction uint64 var coinbaseCount uint64 - for _, tsf := range blk.Transfers { + for _, tsf := range transfers { // Verify Address // Verify Gas // Verify Nonce
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package blockchain import ( "bytes" "sort" "sync" "sync/atomic" "github.com/pkg/errors" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/address" "github.com/iotexproject/iotex-core/crypto" "github.com/iotexproject/iotex-core/iotxaddress" "github.com/iotexproject/iotex-core/pkg/hash" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/state" ) // Validator is the interface of validator type Validator interface { // Validate validates the given block's content Validate(block *Block, tipHeight uint64, tipHash hash.Hash32B, containCoinbase bool) error } type validator struct { sf state.Factory validatorAddr string } var ( // ErrInvalidTipHeight is the error returned when the block height is not valid ErrInvalidTipHeight = errors.New("invalid tip height") // ErrInvalidBlock is the error returned when the block is not valid ErrInvalidBlock = errors.New("failed to validate the block") // ErrActionNonce is the error when the nonce of the action is wrong ErrActionNonce = errors.New("invalid action nonce") // ErrGasHigherThanLimit indicates the error of gas value ErrGasHigherThanLimit = errors.New("invalid gas for action") // ErrInsufficientGas indicates the error of insufficient gas value for data storage ErrInsufficientGas = errors.New("insufficient intrinsic gas value") // ErrBalance indicates the error of balance ErrBalance = errors.New("invalid balance") // ErrDKGSecretProposal indicates the error of DKG secret proposal ErrDKGSecretProposal = errors.New("invalid DKG secret proposal") ) // Validate validates the given block's content func (v *validator) Validate(blk *Block, tipHeight uint64, tipHash hash.Hash32B, containCoinbase bool) error { if err := verifyHeightAndHash(blk, tipHeight, tipHash); err != nil { return errors.Wrap(err, "failed to verify block's height and hash") } if blk.IsDummyBlock() { return nil } if err := verifySigAndRoot(blk); err != nil { return errors.Wrap(err, "failed to verify block's signature and merkle root") } if v.sf != nil { return v.verifyActions(blk, containCoinbase) } return nil } func (v *validator) verifyActions(blk *Block, containCoinbase bool) error { // Verify transfers, votes, executions, witness, and secrets (balance is checked in RunActions) confirmedNonceMap := make(map[string]uint64) accountNonceMap := make(map[string][]uint64) var wg sync.WaitGroup wg.Add(len(blk.Transfers) + len(blk.Votes) + len(blk.Executions)) var correctAction uint64 var coinbaseCount uint64 for _, tsf := range blk.Transfers { // Verify Address // Verify Gas // Verify Nonce // Verify Signature // Verify Coinbase transfer if !tsf.IsCoinbase() { if _, err := iotxaddress.GetPubkeyHash(tsf.Sender()); err != nil { return errors.Wrapf(err, "failed to validate transfer sender's address %s", tsf.Sender()) } if _, err := iotxaddress.GetPubkeyHash(tsf.Recipient()); err != nil { return errors.Wrapf(err, "failed to validate transfer recipient's address %s", tsf.Recipient()) } } if blk.Header.height > 0 && !tsf.IsCoinbase() { // Reject over-gassed transfer if tsf.GasLimit() > GasLimit { return errors.Wrapf(ErrGasHigherThanLimit, "gas is higher than gas limit") } intrinsicGas, err := tsf.IntrinsicGas() if intrinsicGas > tsf.GasLimit() || err != nil { return errors.Wrapf(ErrInsufficientGas, "insufficient gas for transfer") } // Store the nonce of the sender and verify later if _, ok := confirmedNonceMap[tsf.Sender()]; !ok { accountNonce, err := v.sf.Nonce(tsf.Sender()) if err != nil { return errors.Wrap(err, "failed to get the nonce of transfer sender") } confirmedNonceMap[tsf.Sender()] = accountNonce accountNonceMap[tsf.Sender()] = make([]uint64, 0) } accountNonceMap[tsf.Sender()] = append(accountNonceMap[tsf.Sender()], tsf.Nonce()) } go func(tsf *action.Transfer, correctTsf *uint64, correctCoinbase *uint64) { defer wg.Done() // Verify coinbase transfer if tsf.IsCoinbase() { pkHash := keypair.HashPubKey(blk.Header.Pubkey) addr := address.New(blk.Header.chainID, pkHash[:]) if addr.IotxAddress() != tsf.Recipient() { return } atomic.AddUint64(correctCoinbase, uint64(1)) return } if err := action.Verify(tsf); err != nil { return } atomic.AddUint64(correctTsf, uint64(1)) }(tsf, &correctAction, &coinbaseCount) } for _, vote := range blk.Votes { // Verify Address // Verify Gas // Verify Nonce // Verify Signature if _, err := iotxaddress.GetPubkeyHash(vote.Voter()); err != nil { return errors.Wrapf(err, "failed to validate voter's address %s", vote.Voter()) } if vote.Votee() != action.EmptyAddress { if _, err := iotxaddress.GetPubkeyHash(vote.Votee()); err != nil { return errors.Wrapf(err, "failed to validate votee's address %s", vote.Votee()) } } if blk.Header.height > 0 { // Reject over-gassed vote if vote.GasLimit() > GasLimit { return errors.Wrapf(ErrGasHigherThanLimit, "gas is higher than gas limit") } intrinsicGas, err := vote.IntrinsicGas() if intrinsicGas > vote.GasLimit() || err != nil { return errors.Wrapf(ErrInsufficientGas, "insufficient gas for vote") } // Store the nonce of the voter and verify later voterAddress := vote.Voter() if _, ok := confirmedNonceMap[voterAddress]; !ok { accountNonce, err := v.sf.Nonce(voterAddress) if err != nil { return errors.Wrap(err, "failed to get the nonce of the voter") } confirmedNonceMap[voterAddress] = accountNonce accountNonceMap[voterAddress] = make([]uint64, 0) } accountNonceMap[voterAddress] = append(accountNonceMap[voterAddress], vote.Nonce()) } // Verify signature go func(vote *action.Vote, correctVote *uint64) { defer wg.Done() if err := action.Verify(vote); err != nil { return } atomic.AddUint64(correctVote, uint64(1)) }(vote, &correctAction) } for _, execution := range blk.Executions { // Verify Address // Verify Nonce // Verify Signature // Verify Gas // Verify Amount if _, err := iotxaddress.GetPubkeyHash(execution.Executor()); err != nil { return errors.Wrapf(err, "failed to validate executor's address %s", execution.Executor()) } if execution.Contract() != action.EmptyAddress { if _, err := iotxaddress.GetPubkeyHash(execution.Contract()); err != nil { return errors.Wrapf(err, "failed to validate contract's address %s", execution.Contract()) } } if blk.Header.height > 0 { // Store the nonce of the executor and verify later executor := execution.Executor() if _, ok := confirmedNonceMap[executor]; !ok { accountNonce, err := v.sf.Nonce(executor) if err != nil { return errors.Wrap(err, "failed to get the nonce of the executor") } confirmedNonceMap[executor] = accountNonce accountNonceMap[executor] = make([]uint64, 0) } accountNonceMap[executor] = append(accountNonceMap[executor], execution.Nonce()) } // Verify signature go func(execution *action.Execution, correctVote *uint64) { defer wg.Done() if err := action.Verify(execution); err != nil { return } atomic.AddUint64(correctVote, uint64(1)) }(execution, &correctAction) // Reject over-gassed execution if execution.GasLimit() > GasLimit { return errors.Wrapf(ErrGasHigherThanLimit, "gas is higher than gas limit") } intrinsicGas, err := execution.IntrinsicGas() if intrinsicGas > execution.GasLimit() || err != nil { return errors.Wrapf(ErrInsufficientGas, "insufficient gas for execution") } // Reject execution of negative amount if execution.Amount().Sign() < 0 { return errors.Wrapf(ErrBalance, "negative value") } } wg.Wait() // Verify coinbase transfer count if (containCoinbase && coinbaseCount != 1) || (!containCoinbase && coinbaseCount != 0) { return errors.Wrapf( ErrInvalidBlock, "wrong number of coinbase transfers") } if correctAction+coinbaseCount != uint64(len(blk.Transfers)+len(blk.Votes)+len(blk.Executions)) { return errors.Wrapf( ErrInvalidBlock, "failed to verify actions signature") } // Verify Witness if blk.SecretWitness != nil { // Verify witness sender address if _, err := iotxaddress.GetPubkeyHash(blk.SecretWitness.SrcAddr()); err != nil { return errors.Wrapf(err, "failed to validate witness sender's address %s", blk.SecretWitness.SrcAddr()) } // Store the nonce of the witness sender and verify later if _, ok := confirmedNonceMap[blk.SecretWitness.SrcAddr()]; !ok { accountNonce, err := v.sf.Nonce(blk.SecretWitness.SrcAddr()) if err != nil { return errors.Wrap(err, "failed to get the nonce of secret sender") } confirmedNonceMap[blk.SecretWitness.SrcAddr()] = accountNonce accountNonceMap[blk.SecretWitness.SrcAddr()] = make([]uint64, 0) } accountNonceMap[blk.SecretWitness.SrcAddr()] = append(accountNonceMap[blk.SecretWitness.SrcAddr()], blk.SecretWitness.Nonce()) } // Verify Secrets for _, sp := range blk.SecretProposals { // Verify address if _, err := iotxaddress.GetPubkeyHash(sp.SrcAddr()); err != nil { return errors.Wrapf(err, "failed to validate secret sender's address %s", sp.SrcAddr()) } if _, err := iotxaddress.GetPubkeyHash(sp.DstAddr()); err != nil { return errors.Wrapf(err, "failed to validate secret recipient's address %s", sp.DstAddr()) } // Store the nonce of the sender and verify later if _, ok := confirmedNonceMap[sp.SrcAddr()]; !ok { accountNonce, err := v.sf.Nonce(sp.SrcAddr()) if err != nil { return errors.Wrap(err, "failed to get the nonce of secret sender") } confirmedNonceMap[sp.SrcAddr()] = accountNonce accountNonceMap[sp.SrcAddr()] = make([]uint64, 0) } accountNonceMap[sp.SrcAddr()] = append(accountNonceMap[sp.SrcAddr()], sp.Nonce()) // verify secret if the validator is recipient if v.validatorAddr == sp.DstAddr() { validatorID := iotxaddress.CreateID(v.validatorAddr) result, err := crypto.DKG.ShareVerify(validatorID, sp.Secret(), blk.SecretWitness.Witness()) if err == nil { err = ErrDKGSecretProposal } if !result { return errors.Wrap(err, "failed to verify the DKG secret share") } } } if blk.Header.height > 0 { //Verify each account's Nonce for address := range confirmedNonceMap { // The nonce of each action should be increasing, unique and consecutive confirmedNonce := confirmedNonceMap[address] receivedNonce := accountNonceMap[address] sort.Slice(receivedNonce, func(i, j int) bool { return receivedNonce[i] < receivedNonce[j] }) for i, nonce := range receivedNonce { if nonce != confirmedNonce+uint64(i+1) { return errors.Wrap(ErrActionNonce, "the nonce of the action is invalid") } } } } return nil } func verifyHeightAndHash(blk *Block, tipHeight uint64, tipHash hash.Hash32B) error { if blk == nil { return ErrInvalidBlock } // verify new block has height incremented by 1 if blk.Header.height != 0 && blk.Header.height != tipHeight+1 { return errors.Wrapf( ErrInvalidTipHeight, "wrong block height %d, expecting %d", blk.Header.height, tipHeight+1) } // verify new block has correctly linked to current tip if blk.Header.prevBlockHash != tipHash { return errors.Wrapf( ErrInvalidBlock, "wrong prev hash %x, expecting %x", blk.Header.prevBlockHash, tipHash) } return nil } func verifySigAndRoot(blk *Block) error { if blk.Header.height > 0 { // verify new block's signature is correct blkHash := blk.HashBlock() if !crypto.EC283.Verify(blk.Header.Pubkey, blkHash[:], blk.Header.blockSig) { return errors.Wrapf( ErrInvalidBlock, "failed to verify block's signature with public key: %x", blk.Header.Pubkey) } } hashExpect := blk.Header.txRoot hashActual := blk.TxRoot() if !bytes.Equal(hashExpect[:], hashActual[:]) { return errors.Wrapf( ErrInvalidBlock, "wrong tx hash %x, expecting %x", hashActual, hashActual) } return nil }
1
12,879
This can be uniformed too
iotexproject-iotex-core
go
@@ -16,14 +16,14 @@ type mock struct { peers []swarm.Address closestPeer swarm.Address closestPeerErr error - addPeerErr error + AddPeersErr error marshalJSONFunc func() ([]byte, error) mtx sync.Mutex } -func WithAddPeerErr(err error) Option { +func WithAddPeersErr(err error) Option { return optionFunc(func(d *mock) { - d.addPeerErr = err + d.AddPeersErr = err }) }
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package mock import ( "context" "sync" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/topology" ) type mock struct { peers []swarm.Address closestPeer swarm.Address closestPeerErr error addPeerErr error marshalJSONFunc func() ([]byte, error) mtx sync.Mutex } func WithAddPeerErr(err error) Option { return optionFunc(func(d *mock) { d.addPeerErr = err }) } func WithClosestPeer(addr swarm.Address) Option { return optionFunc(func(d *mock) { d.closestPeer = addr }) } func WithClosestPeerErr(err error) Option { return optionFunc(func(d *mock) { d.closestPeerErr = err }) } func WithMarshalJSONFunc(f func() ([]byte, error)) Option { return optionFunc(func(d *mock) { d.marshalJSONFunc = f }) } func NewTopologyDriver(opts ...Option) topology.Driver { d := new(mock) for _, o := range opts { o.apply(d) } return d } func (d *mock) AddPeer(_ context.Context, addr swarm.Address) error { if d.addPeerErr != nil { return d.addPeerErr } d.mtx.Lock() d.peers = append(d.peers, addr) d.mtx.Unlock() return nil } func (d *mock) Connected(ctx context.Context, addr swarm.Address) error { return d.AddPeer(ctx, addr) } func (d *mock) Disconnected(swarm.Address) { panic("todo") } func (d *mock) Peers() []swarm.Address { return d.peers } func (d *mock) ClosestPeer(addr swarm.Address) (peerAddr swarm.Address, err error) { return d.closestPeer, d.closestPeerErr } func (d *mock) SubscribePeersChange() (c <-chan struct{}, unsubscribe func()) { return c, unsubscribe } func (_ *mock) NeighborhoodDepth() uint8 { return 0 } // EachPeer iterates from closest bin to farthest func (_ *mock) EachPeer(_ topology.EachPeerFunc) error { panic("not implemented") // TODO: Implement } // EachPeerRev iterates from farthest bin to closest func (_ *mock) EachPeerRev(_ topology.EachPeerFunc) error { panic("not implemented") // TODO: Implement } func (d *mock) MarshalJSON() ([]byte, error) { return d.marshalJSONFunc() } func (d *mock) Close() error { return nil } type Option interface { apply(*mock) } type optionFunc func(*mock) func (f optionFunc) apply(r *mock) { f(r) }
1
11,867
It looks to me that this does not have to be exported.
ethersphere-bee
go
@@ -61,6 +61,14 @@ public abstract class ApiConfig { } } + /** + * Creates an ApiConfig with no content. Exposed for testing. + */ + public static ApiConfig createDummyApiConfig() { + return new AutoValue_ApiConfig(ImmutableMap.<String, InterfaceConfig> builder().build(), null, + false); + } + private static String getPackageName(ConfigProto configProto) { Map<String, LanguageSettingsProto> settingsMap = configProto.getLanguageSettings(); String language = configProto.getLanguage();
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen; import com.google.api.tools.framework.model.Diag; import com.google.api.tools.framework.model.DiagCollector; import com.google.api.tools.framework.model.Interface; import com.google.api.tools.framework.model.Model; import com.google.api.tools.framework.model.SimpleLocation; import com.google.api.tools.framework.model.SymbolTable; import com.google.common.collect.ImmutableMap; import java.util.Map; import javax.annotation.Nullable; /** * ApiConfig represents the code-gen config for an API library. */ @com.google.auto.value.AutoValue public abstract class ApiConfig { abstract ImmutableMap<String, InterfaceConfig> getInterfaceConfigMap(); /** * Returns the package name. */ @Nullable public abstract String getPackageName(); /** * Whether or not we should generate code samples. */ public abstract boolean generateSamples(); /** * Creates an instance of ApiConfig based on ConfigProto, linking up API interface configurations * with specified interfaces in interfaceConfigMap. On errors, null will be returned, and * diagnostics are reported to the model. */ @Nullable public static ApiConfig createApiConfig(Model model, ConfigProto configProto) { ImmutableMap<String, InterfaceConfig> interfaceConfigMap = createInterfaceConfigMap(model, configProto, model.getSymbolTable()); if (interfaceConfigMap == null) { return null; } else { return new AutoValue_ApiConfig( interfaceConfigMap, getPackageName(configProto), configProto.getGenerateSamples()); } } private static String getPackageName(ConfigProto configProto) { Map<String, LanguageSettingsProto> settingsMap = configProto.getLanguageSettings(); String language = configProto.getLanguage(); if (settingsMap.containsKey(language)) { return settingsMap.get(language).getPackageName(); } else { return null; } } private static ImmutableMap<String, InterfaceConfig> createInterfaceConfigMap( DiagCollector diagCollector, ConfigProto configProto, SymbolTable symbolTable) { ImmutableMap.Builder<String, InterfaceConfig> interfaceConfigMap = ImmutableMap.<String, InterfaceConfig>builder(); for (InterfaceConfigProto interfaceConfigProto : configProto.getInterfacesList()) { Interface iface = symbolTable.lookupInterface(interfaceConfigProto.getName()); if (iface == null || !iface.isReachable()) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "interface not found: %s", interfaceConfigProto.getName())); continue; } InterfaceConfig interfaceConfig = InterfaceConfig.createInterfaceConfig(diagCollector, interfaceConfigProto, iface); if (interfaceConfig == null) { continue; } interfaceConfigMap.put(interfaceConfigProto.getName(), interfaceConfig); } if (diagCollector.getErrorCount() > 0) { return null; } else { return interfaceConfigMap.build(); } } /** * Returns the InterfaceConfig for the given API interface. */ public InterfaceConfig getInterfaceConfig(Interface iface) { InterfaceConfig interfaceConfig = getInterfaceConfigMap().get(iface.getFullName()); if (interfaceConfig == null) { throw new IllegalArgumentException( "no interface config for interface '" + iface.getFullName() + "'"); } return interfaceConfig; } }
1
15,347
It looks like this is only used in order to extract the snippet file names from GapicProviderFactory. I wonder if it would be better expose the snippet file names directly, without exposing a test-only function in non-test code.
googleapis-gapic-generator
java
@@ -511,18 +511,6 @@ public class DataversePage implements java.io.Serializable { } setEditInputLevel(false); } - - public void toggleInputLevel( Long mdbId, long dsftId){ - for (MetadataBlock mdb : allMetadataBlocks) { - if (mdb.getId().equals(mdbId)) { - for (DatasetFieldType dsftTest : mdb.getDatasetFieldTypes()) { - if (dsftTest.getId().equals(dsftId)) { - dsftTest.setRequiredDV(!dsftTest.isRequiredDV()); - } - } - } - } - } public void updateInclude(Long mdbId, long dsftId) { List<DatasetFieldType> childDSFT = new ArrayList<>();
1
package edu.harvard.iq.dataverse; import edu.harvard.iq.dataverse.UserNotification.Type; import edu.harvard.iq.dataverse.authorization.Permission; import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser; import edu.harvard.iq.dataverse.authorization.users.User; import edu.harvard.iq.dataverse.dataaccess.DataAccess; import edu.harvard.iq.dataverse.dataverse.DataverseUtil; import edu.harvard.iq.dataverse.engine.command.Command; import edu.harvard.iq.dataverse.engine.command.DataverseRequest; import edu.harvard.iq.dataverse.engine.command.exception.CommandException; import edu.harvard.iq.dataverse.engine.command.impl.CreateDataverseCommand; import edu.harvard.iq.dataverse.engine.command.impl.CreateSavedSearchCommand; import edu.harvard.iq.dataverse.engine.command.impl.DeleteDataverseCommand; import edu.harvard.iq.dataverse.engine.command.impl.LinkDataverseCommand; import edu.harvard.iq.dataverse.engine.command.impl.PublishDataverseCommand; import edu.harvard.iq.dataverse.engine.command.impl.UpdateDataverseCommand; import edu.harvard.iq.dataverse.search.FacetCategory; import edu.harvard.iq.dataverse.search.IndexServiceBean; import edu.harvard.iq.dataverse.search.SearchException; import edu.harvard.iq.dataverse.search.SearchFields; import edu.harvard.iq.dataverse.search.SearchIncludeFragment; import edu.harvard.iq.dataverse.search.SearchServiceBean; import edu.harvard.iq.dataverse.search.savedsearch.SavedSearch; import edu.harvard.iq.dataverse.search.savedsearch.SavedSearchFilterQuery; import edu.harvard.iq.dataverse.search.savedsearch.SavedSearchServiceBean; import edu.harvard.iq.dataverse.util.BundleUtil; import edu.harvard.iq.dataverse.util.JsfHelper; import static edu.harvard.iq.dataverse.util.JsfHelper.JH; import edu.harvard.iq.dataverse.util.SystemConfig; import java.util.List; import javax.ejb.EJB; import javax.faces.application.FacesMessage; import javax.faces.context.FacesContext; import javax.faces.event.ActionEvent; import javax.faces.view.ViewScoped; import javax.inject.Inject; import javax.inject.Named; import java.util.AbstractMap; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.logging.Level; import java.util.logging.Logger; import javax.faces.component.UIComponent; import javax.faces.component.UIInput; import org.primefaces.model.DualListModel; import javax.ejb.EJBException; import javax.faces.event.ValueChangeEvent; import javax.faces.model.SelectItem; import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang.StringUtils; import org.primefaces.PrimeFaces; import org.primefaces.event.TransferEvent; /** * * @author gdurand */ @ViewScoped @Named("DataversePage") public class DataversePage implements java.io.Serializable { private static final Logger logger = Logger.getLogger(DataversePage.class.getCanonicalName()); public enum EditMode { CREATE, INFO, FEATURED } public enum LinkMode { SAVEDSEARCH, LINKDATAVERSE } @EJB DataverseServiceBean dataverseService; @EJB DatasetServiceBean datasetService; @Inject DataverseSession session; @EJB EjbDataverseEngine commandEngine; @EJB SearchServiceBean searchService; @EJB DatasetFieldServiceBean datasetFieldService; @EJB DataverseFacetServiceBean dataverseFacetService; @EJB UserNotificationServiceBean userNotificationService; @EJB FeaturedDataverseServiceBean featuredDataverseService; @EJB DataverseFieldTypeInputLevelServiceBean dataverseFieldTypeInputLevelService; @EJB PermissionServiceBean permissionService; @EJB ControlledVocabularyValueServiceBean controlledVocabularyValueServiceBean; @EJB SavedSearchServiceBean savedSearchService; @EJB SystemConfig systemConfig; @EJB DataverseRoleServiceBean dataverseRoleServiceBean; @Inject SearchIncludeFragment searchIncludeFragment; @Inject DataverseRequestServiceBean dvRequestService; @Inject SettingsWrapper settingsWrapper; @EJB DataverseLinkingServiceBean linkingService; @Inject PermissionsWrapper permissionsWrapper; @Inject DataverseHeaderFragment dataverseHeaderFragment; private Dataverse dataverse = new Dataverse(); /** * View parameters */ private Long id = null; private String alias = null; private Long ownerId = null; private EditMode editMode; private LinkMode linkMode; private DualListModel<DatasetFieldType> facets = new DualListModel<>(new ArrayList<>(), new ArrayList<>()); private DualListModel<Dataverse> featuredDataverses = new DualListModel<>(new ArrayList<>(), new ArrayList<>()); private List<Dataverse> dataversesForLinking; private Long linkingDataverseId; private List<SelectItem> linkingDVSelectItems; private Dataverse linkingDataverse; private List<ControlledVocabularyValue> selectedSubjects; public List<ControlledVocabularyValue> getSelectedSubjects() { return selectedSubjects; } public void setSelectedSubjects(List<ControlledVocabularyValue> selectedSubjects) { this.selectedSubjects = selectedSubjects; } public Dataverse getLinkingDataverse() { return linkingDataverse; } public void setLinkingDataverse(Dataverse linkingDataverse) { this.linkingDataverse = linkingDataverse; } public List<SelectItem> getLinkingDVSelectItems() { return linkingDVSelectItems; } public void setLinkingDVSelectItems(List<SelectItem> linkingDVSelectItems) { this.linkingDVSelectItems = linkingDVSelectItems; } public Long getLinkingDataverseId() { return linkingDataverseId; } public void setLinkingDataverseId(Long linkingDataverseId) { this.linkingDataverseId = linkingDataverseId; } public List<Dataverse> getDataversesForLinking() { return dataversesForLinking; } public void setDataversesForLinking(List<Dataverse> dataversesForLinking) { this.dataversesForLinking = dataversesForLinking; } private List<ControlledVocabularyValue> dataverseSubjectControlledVocabularyValues; public List<ControlledVocabularyValue> getDataverseSubjectControlledVocabularyValues() { return dataverseSubjectControlledVocabularyValues; } public void setDataverseSubjectControlledVocabularyValues(List<ControlledVocabularyValue> dataverseSubjectControlledVocabularyValues) { this.dataverseSubjectControlledVocabularyValues = dataverseSubjectControlledVocabularyValues; } private void updateDataverseSubjectSelectItems() { DatasetFieldType subjectDatasetField = datasetFieldService.findByName(DatasetFieldConstant.subject); setDataverseSubjectControlledVocabularyValues(controlledVocabularyValueServiceBean.findByDatasetFieldTypeId(subjectDatasetField.getId())); } public LinkMode getLinkMode() { return linkMode; } public void setLinkMode(LinkMode linkMode) { this.linkMode = linkMode; } public boolean showLinkingPopup() { String testquery = ""; if (session.getUser() == null) { return false; } if (dataverse == null) { return false; } if (query != null) { testquery = query; } return (session.getUser().isSuperuser() && (dataverse.getOwner() != null || !testquery.isEmpty())); } public void setupLinkingPopup (String popupSetting){ if (popupSetting.equals("link")){ setLinkMode(LinkMode.LINKDATAVERSE); } else { setLinkMode(LinkMode.SAVEDSEARCH); } updateLinkableDataverses(); } public void updateLinkableDataverses() { dataversesForLinking = new ArrayList<>(); linkingDVSelectItems = new ArrayList<>(); //Since only a super user function add all dvs dataversesForLinking = dataverseService.findAll();// permissionService.getDataversesUserHasPermissionOn(session.getUser(), Permission.PublishDataverse); /* List<DataverseRole> roles = dataverseRoleServiceBean.getDataverseRolesByPermission(Permission.PublishDataverse, dataverse.getId()); List<String> types = new ArrayList(); types.add("Dataverse"); for (Long dvIdAsInt : permissionService.getDvObjectIdsUserHasRoleOn(session.getUser(), roles, types, false)) { dataversesForLinking.add(dataverseService.find(dvIdAsInt)); }*/ //for linking - make sure the link hasn't occurred and its not int the tree if (this.linkMode.equals(LinkMode.LINKDATAVERSE)) { // remove this and it's parent tree dataversesForLinking.remove(dataverse); Dataverse testDV = dataverse; while(testDV.getOwner() != null){ dataversesForLinking.remove(testDV.getOwner()); testDV = testDV.getOwner(); } for (Dataverse removeLinked : linkingService.findLinkingDataverses(dataverse.getId())) { dataversesForLinking.remove(removeLinked); } } else{ //for saved search add all } for (Dataverse selectDV : dataversesForLinking) { linkingDVSelectItems.add(new SelectItem(selectDV.getId(), selectDV.getDisplayName())); } if (!dataversesForLinking.isEmpty() && dataversesForLinking.size() == 1 && dataversesForLinking.get(0) != null) { linkingDataverse = dataversesForLinking.get(0); linkingDataverseId = linkingDataverse.getId(); } } public void updateSelectedLinkingDV(ValueChangeEvent event) { linkingDataverseId = (Long) event.getNewValue(); } public Dataverse getDataverse() { return dataverse; } public void setDataverse(Dataverse dataverse) { this.dataverse = dataverse; } public Long getId() { return this.id; } public void setId(Long id) { this.id = id; } public String getAlias() { return this.alias; } public void setAlias(String alias) { this.alias = alias; } public EditMode getEditMode() { return editMode; } public void setEditMode(EditMode editMode) { this.editMode = editMode; } public Long getOwnerId() { return ownerId; } public void setOwnerId(Long ownerId) { this.ownerId = ownerId; } public void updateOwnerDataverse() { if (dataverse.getOwner() != null && dataverse.getOwner().getId() != null) { ownerId = dataverse.getOwner().getId(); logger.info("New host dataverse id: " + ownerId); // discard the dataverse already created: dataverse = new Dataverse(); // initialize a new new dataverse: init(); dataverseHeaderFragment.initBreadcrumbs(dataverse); } } public String init() { //System.out.println("_YE_OLDE_QUERY_COUNTER_"); // for debug purposes if (this.getAlias() != null || this.getId() != null || this.getOwnerId() == null) {// view mode for a dataverse if (this.getAlias() != null) { dataverse = dataverseService.findByAlias(this.getAlias()); } else if (this.getId() != null) { dataverse = dataverseService.find(this.getId()); } else { try { dataverse = dataverseService.findRootDataverse(); } catch (EJBException e) { // @todo handle case with no root dataverse (a fresh installation) with message about using API to create the root dataverse = null; } } // check if dv exists and user has permission if (dataverse == null) { return permissionsWrapper.notFound(); } if (!dataverse.isReleased() && !permissionService.on(dataverse).has(Permission.ViewUnpublishedDataverse)) { return permissionsWrapper.notAuthorized(); } ownerId = dataverse.getOwner() != null ? dataverse.getOwner().getId() : null; } else { // ownerId != null; create mode for a new child dataverse editMode = EditMode.CREATE; dataverse.setOwner(dataverseService.find( this.getOwnerId())); if (dataverse.getOwner() == null) { return permissionsWrapper.notFound(); } else if (!permissionService.on(dataverse.getOwner()).has(Permission.AddDataverse)) { return permissionsWrapper.notAuthorized(); } // set defaults - contact e-mail and affiliation from user dataverse.getDataverseContacts().add(new DataverseContact(dataverse, session.getUser().getDisplayInfo().getEmailAddress())); dataverse.setAffiliation(session.getUser().getDisplayInfo().getAffiliation()); setupForGeneralInfoEdit(); // FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Create New Dataverse", " - Create a new dataverse that will be a child dataverse of the parent you clicked from. Asterisks indicate required fields.")); if (dataverse.getName() == null) { dataverse.setName(DataverseUtil.getSuggestedDataverseNameOnCreate(session.getUser())); } } return null; } public void initFeaturedDataverses() { List<Dataverse> featuredSource = new ArrayList<>(); List<Dataverse> featuredTarget = new ArrayList<>(); featuredSource.addAll(dataverseService.findAllPublishedByOwnerId(dataverse.getId())); featuredSource.addAll(linkingService.findLinkingDataverses(dataverse.getId())); List<DataverseFeaturedDataverse> featuredList = featuredDataverseService.findByDataverseId(dataverse.getId()); for (DataverseFeaturedDataverse dfd : featuredList) { Dataverse fd = dfd.getFeaturedDataverse(); featuredTarget.add(fd); featuredSource.remove(fd); } featuredDataverses = new DualListModel<>(featuredSource, featuredTarget); } public void initFacets() { List<DatasetFieldType> facetsSource = new ArrayList<>(); List<DatasetFieldType> facetsTarget = new ArrayList<>(); facetsSource.addAll(datasetFieldService.findAllFacetableFieldTypes()); List<DataverseFacet> facetsList = dataverseFacetService.findByDataverseId(dataverse.getFacetRootId()); for (DataverseFacet dvFacet : facetsList) { DatasetFieldType dsfType = dvFacet.getDatasetFieldType(); facetsTarget.add(dsfType); facetsSource.remove(dsfType); } facets = new DualListModel<>(facetsSource, facetsTarget); facetMetadataBlockId = null; } private void setupForGeneralInfoEdit() { updateDataverseSubjectSelectItems(); initFacets(); refreshAllMetadataBlocks(); } private Long facetMetadataBlockId; public Long getFacetMetadataBlockId() { return facetMetadataBlockId; } public void setFacetMetadataBlockId(Long facetMetadataBlockId) { this.facetMetadataBlockId = facetMetadataBlockId; } public void changeFacetsMetadataBlock() { if (facetMetadataBlockId == null) { facets.setSource(datasetFieldService.findAllFacetableFieldTypes()); } else { facets.setSource(datasetFieldService.findFacetableFieldTypesByMetadataBlock(facetMetadataBlockId)); } facets.getSource().removeAll(facets.getTarget()); } public void toggleFacetRoot() { if (!dataverse.isFacetRoot()) { initFacets(); } } public void onFacetTransfer(TransferEvent event) { for (Object item : event.getItems()) { DatasetFieldType facet = (DatasetFieldType) item; if (facetMetadataBlockId != null && !facetMetadataBlockId.equals(facet.getMetadataBlock().getId())) { facets.getSource().remove(facet); } } } private List<Dataverse> carouselFeaturedDataverses = null; public List<Dataverse> getCarouselFeaturedDataverses() { if (carouselFeaturedDataverses != null) { return carouselFeaturedDataverses; } carouselFeaturedDataverses = featuredDataverseService.findByDataverseIdQuick(dataverse.getId());/*new ArrayList(); List<DataverseFeaturedDataverse> featuredList = featuredDataverseService.findByDataverseId(dataverse.getId()); for (DataverseFeaturedDataverse dfd : featuredList) { Dataverse fd = dfd.getFeaturedDataverse(); retList.add(fd); }*/ return carouselFeaturedDataverses; } public List getContents() { List contentsList = dataverseService.findByOwnerId(dataverse.getId()); contentsList.addAll(datasetService.findByOwnerId(dataverse.getId())); return contentsList; } public void edit(EditMode editMode) { this.editMode = editMode; if (editMode == EditMode.INFO) { setupForGeneralInfoEdit(); JH.addMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataverse.edit.msg"), BundleUtil.getStringFromBundle("dataverse.edit.detailmsg")); } else if (editMode == EditMode.FEATURED) { initFeaturedDataverses(); } } public void refresh() { } private boolean openMetadataBlock; public boolean isOpenMetadataBlock() { return openMetadataBlock; } public void setOpenMetadataBlock(boolean openMetadataBlock) { this.openMetadataBlock = openMetadataBlock; } private boolean editInputLevel; public boolean isEditInputLevel() { return editInputLevel; } public void setEditInputLevel(boolean editInputLevel) { this.editInputLevel = editInputLevel; } public void showDatasetFieldTypes(Long mdbId) { showDatasetFieldTypes(mdbId, true); } public void showDatasetFieldTypes(Long mdbId, boolean allowEdit) { for (MetadataBlock mdb : allMetadataBlocks) { if (mdb.getId().equals(mdbId)) { mdb.setShowDatasetFieldTypes(true); openMetadataBlock = true; } } setEditInputLevel(allowEdit); } public void hideDatasetFieldTypes(Long mdbId) { for (MetadataBlock mdb : allMetadataBlocks) { if (mdb.getId().equals(mdbId)) { mdb.setShowDatasetFieldTypes(false); openMetadataBlock = false; } } setEditInputLevel(false); } public void toggleInputLevel( Long mdbId, long dsftId){ for (MetadataBlock mdb : allMetadataBlocks) { if (mdb.getId().equals(mdbId)) { for (DatasetFieldType dsftTest : mdb.getDatasetFieldTypes()) { if (dsftTest.getId().equals(dsftId)) { dsftTest.setRequiredDV(!dsftTest.isRequiredDV()); } } } } } public void updateInclude(Long mdbId, long dsftId) { List<DatasetFieldType> childDSFT = new ArrayList<>(); for (MetadataBlock mdb : allMetadataBlocks) { if (mdb.getId().equals(mdbId)) { for (DatasetFieldType dsftTest : mdb.getDatasetFieldTypes()) { if (dsftTest.getId().equals(dsftId)) { dsftTest.setOptionSelectItems(resetSelectItems(dsftTest)); if ((dsftTest.isHasParent() && !dsftTest.getParentDatasetFieldType().isInclude()) || (!dsftTest.isHasParent() && !dsftTest.isInclude())) { dsftTest.setRequiredDV(false); } if (dsftTest.isHasChildren()) { childDSFT.addAll(dsftTest.getChildDatasetFieldTypes()); } } } } } if (!childDSFT.isEmpty()) { for (DatasetFieldType dsftUpdate : childDSFT) { for (MetadataBlock mdb : allMetadataBlocks) { if (mdb.getId().equals(mdbId)) { for (DatasetFieldType dsftTest : mdb.getDatasetFieldTypes()) { if (dsftTest.getId().equals(dsftUpdate.getId())) { dsftTest.setOptionSelectItems(resetSelectItems(dsftTest)); } } } } } } PrimeFaces.current().executeScript("scrollAfterUpdate();"); } public List<SelectItem> resetSelectItems(DatasetFieldType typeIn) { List<SelectItem> retList = new ArrayList<>(); if ((typeIn.isHasParent() && typeIn.getParentDatasetFieldType().isInclude()) || (!typeIn.isHasParent() && typeIn.isInclude())) { SelectItem requiredItem = new SelectItem(); requiredItem.setLabel(BundleUtil.getStringFromBundle("dataverse.item.required")); requiredItem.setValue(true); retList.add(requiredItem); SelectItem optional = new SelectItem(); optional.setLabel(BundleUtil.getStringFromBundle("dataverse.item.optional")); optional.setValue(false); retList.add(optional); } else { SelectItem hidden = new SelectItem(); hidden.setLabel(BundleUtil.getStringFromBundle("dataverse.item.hidden")); hidden.setValue(false); hidden.setDisabled(true); retList.add(hidden); } return retList; } public void updateRequiredDatasetFieldTypes(Long mdbId, Long dsftId, boolean inVal) { for (MetadataBlock mdb : allMetadataBlocks) { if (mdb.getId().equals(mdbId)) { for (DatasetFieldType dsft : mdb.getDatasetFieldTypes()) { if (dsft.getId().equals(dsftId)) { dsft.setRequiredDV(!inVal); } } } } } public void updateOptionsRadio(Long mdbId, Long dsftId) { for (MetadataBlock mdb : allMetadataBlocks) { if (mdb.getId().equals(mdbId)) { for (DatasetFieldType dsft : mdb.getDatasetFieldTypes()) { if (dsft.getId().equals(dsftId)) { dsft.setOptionSelectItems(resetSelectItems(dsft)); } } } } } public String save() { List<DataverseFieldTypeInputLevel> listDFTIL = new ArrayList<>(); if (editMode != null && ( editMode.equals(EditMode.INFO) || editMode.equals(EditMode.CREATE))) { List<MetadataBlock> selectedBlocks = new ArrayList<>(); if (dataverse.isMetadataBlockRoot()) { dataverse.getMetadataBlocks().clear(); } for (MetadataBlock mdb : this.allMetadataBlocks) { if (dataverse.isMetadataBlockRoot() && (mdb.isSelected() || mdb.isRequired())) { selectedBlocks.add(mdb); for (DatasetFieldType dsft : mdb.getDatasetFieldTypes()) { if (dsft.isRequiredDV() && !dsft.isRequired() && ((!dsft.isHasParent() && dsft.isInclude()) || (dsft.isHasParent() && dsft.getParentDatasetFieldType().isInclude()))) { DataverseFieldTypeInputLevel dftil = new DataverseFieldTypeInputLevel(); dftil.setDatasetFieldType(dsft); dftil.setDataverse(dataverse); dftil.setRequired(true); dftil.setInclude(true); listDFTIL.add(dftil); } if ((!dsft.isHasParent() && !dsft.isInclude()) || (dsft.isHasParent() && !dsft.getParentDatasetFieldType().isInclude())) { DataverseFieldTypeInputLevel dftil = new DataverseFieldTypeInputLevel(); dftil.setDatasetFieldType(dsft); dftil.setDataverse(dataverse); dftil.setRequired(false); dftil.setInclude(false); listDFTIL.add(dftil); } } } } if (!selectedBlocks.isEmpty()) { dataverse.setMetadataBlocks(selectedBlocks); } if (!dataverse.isFacetRoot()) { facets.getTarget().clear(); } } Command<Dataverse> cmd = null; //TODO change to Create - for now the page is expecting INFO instead. Boolean create; if (dataverse.getId() == null) { if (session.getUser().isAuthenticated()) { if (dataverse.getOwner() == null || dataverse.getOwner().getId() == null) { dataverse.setOwner(ownerId != null ? dataverseService.find(ownerId) : null); } create = Boolean.TRUE; cmd = new CreateDataverseCommand(dataverse, dvRequestService.getDataverseRequest(), facets.getTarget(), listDFTIL); } else { JH.addMessage(FacesMessage.SEVERITY_FATAL, BundleUtil.getStringFromBundle("dataverse.create.authenticatedUsersOnly")); return null; } } else { create = Boolean.FALSE; if (editMode != null && editMode.equals(EditMode.FEATURED)) { cmd = new UpdateDataverseCommand(dataverse, null, featuredDataverses.getTarget(), dvRequestService.getDataverseRequest(), null); } else { cmd = new UpdateDataverseCommand(dataverse, facets.getTarget(), null, dvRequestService.getDataverseRequest(), listDFTIL); } } try { dataverse = commandEngine.submit(cmd); if (session.getUser() instanceof AuthenticatedUser) { if (create) { userNotificationService.sendNotification((AuthenticatedUser) session.getUser(), dataverse.getCreateDate(), Type.CREATEDV, dataverse.getId()); } } String message; if (editMode != null && editMode.equals(EditMode.FEATURED)) { message = BundleUtil.getStringFromBundle("dataverse.feature.update"); } else { message = (create) ? BundleUtil.getStringFromBundle("dataverse.create.success", Arrays.asList(settingsWrapper.getGuidesBaseUrl(), systemConfig.getGuidesVersion())) : BundleUtil.getStringFromBundle("dataverse.update.success"); } JsfHelper.addSuccessMessage(message); editMode = null; return returnRedirect(); } catch (CommandException ex) { logger.log(Level.SEVERE, "Unexpected Exception calling dataverse command", ex); String errMsg = create ? BundleUtil.getStringFromBundle("dataverse.create.failure") : BundleUtil.getStringFromBundle("dataverse.update.failure"); JH.addMessage(FacesMessage.SEVERITY_FATAL, errMsg); return null; } catch (Exception e) { logger.log(Level.SEVERE, "Unexpected Exception calling dataverse command", e); String errMsg = create ? BundleUtil.getStringFromBundle("dataverse.create.failure") : BundleUtil.getStringFromBundle("dataverse.update.failure"); JH.addMessage(FacesMessage.SEVERITY_FATAL, errMsg); return null; } } public String cancel() { // reset values dataverse = dataverseService.find(dataverse.getId()); ownerId = dataverse.getOwner() != null ? dataverse.getOwner().getId() : null; editMode = null; return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; } public boolean isRootDataverse() { return dataverse.getOwner() == null; } public Dataverse getOwner() { return (ownerId != null) ? dataverseService.find(ownerId) : null; } // METHODS for Dataverse Setup public boolean isInheritMetadataBlockFromParent() { return !dataverse.isMetadataBlockRoot(); } public void setInheritMetadataBlockFromParent(boolean inheritMetadataBlockFromParent) { dataverse.setMetadataBlockRoot(!inheritMetadataBlockFromParent); } public void editMetadataBlocks() { if (!dataverse.isMetadataBlockRoot()) { refreshAllMetadataBlocks(); } } public void editMetadataBlocks(boolean checkVal) { setInheritMetadataBlockFromParent(checkVal); if (!dataverse.isMetadataBlockRoot()) { refreshAllMetadataBlocks(); } } public String resetToInherit() { setInheritMetadataBlockFromParent(true); refreshAllMetadataBlocks(); return null; } public void cancelMetadataBlocks() { setInheritMetadataBlockFromParent(false); } public boolean isInheritFacetFromParent() { return !dataverse.isFacetRoot(); } public void setInheritFacetFromParent(boolean inheritFacetFromParent) { dataverse.setFacetRoot(!inheritFacetFromParent); } public DualListModel<DatasetFieldType> getFacets() { return facets; } public void setFacets(DualListModel<DatasetFieldType> facets) { this.facets = facets; } public DualListModel<Dataverse> getFeaturedDataverses() { return featuredDataverses; } public void setFeaturedDataverses(DualListModel<Dataverse> featuredDataverses) { this.featuredDataverses = featuredDataverses; } public String saveLinkedDataverse() { if (linkingDataverseId == null) { JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataverse.link.select")); return ""; } linkingDataverse = dataverseService.find(linkingDataverseId); LinkDataverseCommand cmd = new LinkDataverseCommand(dvRequestService.getDataverseRequest(), linkingDataverse, dataverse); try { commandEngine.submit(cmd); } catch (CommandException ex) { List<String> args = Arrays.asList(dataverse.getDisplayName(),linkingDataverse.getDisplayName()); String msg = BundleUtil.getStringFromBundle("dataverse.link.error", args); logger.log(Level.SEVERE, "{0} {1}", new Object[]{msg, ex}); JsfHelper.addErrorMessage(msg); return returnRedirect(); } JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataverse.linked.success.wait", getSuccessMessageArguments())); return returnRedirect(); } private List<String> getSuccessMessageArguments() { List<String> arguments = new ArrayList<>(); arguments.add(StringEscapeUtils.escapeHtml(dataverse.getDisplayName())); String linkString = "<a href=\"/dataverse/" + linkingDataverse.getAlias() + "\">" + StringEscapeUtils.escapeHtml(linkingDataverse.getDisplayName()) + "</a>"; arguments.add(linkString); return arguments; } @Deprecated private SavedSearch createSavedOfCurrentDataverse(AuthenticatedUser savedSearchCreator) { /** * Please note that we are relying on the fact that the Solr ID of a * dataverse never changes, unlike datasets and files, which will change * from "dataset_10_draft" to "dataset_10" when published, for example. */ String queryForCurrentDataverse = SearchFields.ID + ":" + IndexServiceBean.solrDocIdentifierDataverse + dataverse.getId(); SavedSearch savedSearchToPersist = new SavedSearch(queryForCurrentDataverse, linkingDataverse, savedSearchCreator); SavedSearch savedSearchCreated = savedSearchService.add(savedSearchToPersist); return savedSearchCreated; } private SavedSearch createSavedSearchForChildren(AuthenticatedUser savedSearchCreator) { String wildcardQuery = "*"; SavedSearch savedSearchToPersist = new SavedSearch(wildcardQuery, linkingDataverse, savedSearchCreator); String dataversePath = dataverseService.determineDataversePath(dataverse); String filterDownToSubtree = SearchFields.SUBTREE + ":\"" + dataversePath + "\""; SavedSearchFilterQuery filterDownToSubtreeFilterQuery = new SavedSearchFilterQuery(filterDownToSubtree, savedSearchToPersist); savedSearchToPersist.setSavedSearchFilterQueries(Arrays.asList(filterDownToSubtreeFilterQuery)); SavedSearch savedSearchCreated = savedSearchService.add(savedSearchToPersist); return savedSearchCreated; } public String saveSavedSearch() { if (linkingDataverseId == null) { JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataverse.link.select")); return ""; } linkingDataverse = dataverseService.find(linkingDataverseId); AuthenticatedUser savedSearchCreator = getAuthenticatedUser(); if (savedSearchCreator == null) { String msg = BundleUtil.getStringFromBundle("dataverse.search.user"); logger.severe(msg); JsfHelper.addErrorMessage(msg); return returnRedirect(); } SavedSearch savedSearch = new SavedSearch(query, linkingDataverse, savedSearchCreator); savedSearch.setSavedSearchFilterQueries(new ArrayList<>()); for (String filterQuery : filterQueries) { /** * @todo Why are there null's here anyway? Turn on debug and figure * this out. */ if (filterQuery != null && !filterQuery.isEmpty()) { SavedSearchFilterQuery ssfq = new SavedSearchFilterQuery(filterQuery,savedSearch); savedSearch.getSavedSearchFilterQueries().add(ssfq); } } CreateSavedSearchCommand cmd = new CreateSavedSearchCommand(dvRequestService.getDataverseRequest(), linkingDataverse, savedSearch); try { commandEngine.submit(cmd); List<String> arguments = new ArrayList<>(); String linkString = "<a href=\"/dataverse/" + linkingDataverse.getAlias() + "\">" + StringEscapeUtils.escapeHtml(linkingDataverse.getDisplayName()) + "</a>"; arguments.add(linkString); String successMessageString = BundleUtil.getStringFromBundle("dataverse.saved.search.success", arguments); JsfHelper.addSuccessMessage(successMessageString); return returnRedirect(); } catch (CommandException ex) { String msg = "There was a problem linking this search to yours: " + ex; logger.severe(msg); JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataverse.saved.search.failure") + " " + ex); return returnRedirect(); } } private AuthenticatedUser getAuthenticatedUser() { User user = session.getUser(); if (user.isAuthenticated()) { return (AuthenticatedUser) user; } else { return null; } } public String releaseDataverse() { if (session.getUser() instanceof AuthenticatedUser) { PublishDataverseCommand cmd = new PublishDataverseCommand(dvRequestService.getDataverseRequest(), dataverse); try { commandEngine.submit(cmd); JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataverse.publish.success")); } catch (Exception ex) { logger.log(Level.SEVERE, "Unexpected Exception calling publish dataverse command", ex); JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataverse.publish.failure")); } } else { JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataverse.publish.not.authorized")); } return returnRedirect(); } public String deleteDataverse() { DeleteDataverseCommand cmd = new DeleteDataverseCommand(dvRequestService.getDataverseRequest(), dataverse); try { commandEngine.submit(cmd); JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataverse.delete.success")); } catch (Exception ex) { logger.log(Level.SEVERE, "Unexpected Exception calling delete dataverse command", ex); JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataverse.delete.failure")); } return "/dataverse.xhtml?alias=" + dataverse.getOwner().getAlias() + "&faces-redirect=true"; } public String getMetadataBlockPreview(MetadataBlock mdb, int numberOfItems) { /// for beta, we will just preview the first n fields StringBuilder mdbPreview = new StringBuilder(); int count = 0; for (DatasetFieldType dsfType : mdb.getDatasetFieldTypes()) { if (!dsfType.isChild()) { if (count != 0) { mdbPreview.append(", "); if (count == numberOfItems) { mdbPreview.append("etc."); break; } } mdbPreview.append(dsfType.getDisplayName()); count++; } } return mdbPreview.toString(); } public Boolean isEmptyDataverse() { return !dataverseService.hasData(dataverse); } private List<MetadataBlock> allMetadataBlocks; public List<MetadataBlock> getAllMetadataBlocks() { return this.allMetadataBlocks; } public void setAllMetadataBlocks(List<MetadataBlock> inBlocks) { this.allMetadataBlocks = inBlocks; } private void refreshAllMetadataBlocks() { Long dataverseIdForInputLevel = dataverse.getId(); List<MetadataBlock> retList = new ArrayList<>(); List<MetadataBlock> availableBlocks = new ArrayList<>(); //Add System level blocks availableBlocks.addAll(dataverseService.findSystemMetadataBlocks()); Dataverse testDV = dataverse; //Add blocks associated with DV availableBlocks.addAll(dataverseService.findMetadataBlocksByDataverseId(dataverse.getId())); //Add blocks associated with dv going up inheritance tree while (testDV.getOwner() != null) { availableBlocks.addAll(dataverseService.findMetadataBlocksByDataverseId(testDV.getOwner().getId())); testDV = testDV.getOwner(); } for (MetadataBlock mdb : availableBlocks) { mdb.setSelected(false); mdb.setShowDatasetFieldTypes(false); if (!dataverse.isMetadataBlockRoot() && dataverse.getOwner() != null) { dataverseIdForInputLevel = dataverse.getMetadataRootId(); for (MetadataBlock mdbTest : dataverse.getOwner().getMetadataBlocks()) { if (mdb.equals(mdbTest)) { mdb.setSelected(true); } } } else { for (MetadataBlock mdbTest : dataverse.getMetadataBlocks(true)) { if (mdb.equals(mdbTest)) { mdb.setSelected(true); } } } for (DatasetFieldType dsft : mdb.getDatasetFieldTypes()) { if (!dsft.isChild()) { DataverseFieldTypeInputLevel dsfIl = dataverseFieldTypeInputLevelService.findByDataverseIdDatasetFieldTypeId(dataverseIdForInputLevel, dsft.getId()); if (dsfIl != null) { dsft.setRequiredDV(dsfIl.isRequired()); dsft.setInclude(dsfIl.isInclude()); } else { dsft.setRequiredDV(dsft.isRequired()); dsft.setInclude(true); } dsft.setOptionSelectItems(resetSelectItems(dsft)); if (dsft.isHasChildren()) { for (DatasetFieldType child : dsft.getChildDatasetFieldTypes()) { DataverseFieldTypeInputLevel dsfIlChild = dataverseFieldTypeInputLevelService.findByDataverseIdDatasetFieldTypeId(dataverseIdForInputLevel, child.getId()); if (dsfIlChild != null) { child.setRequiredDV(dsfIlChild.isRequired()); child.setInclude(dsfIlChild.isInclude()); } else { child.setRequiredDV(child.isRequired()); child.setInclude(true); } child.setOptionSelectItems(resetSelectItems(child)); } } } } retList.add(mdb); } setAllMetadataBlocks(retList); } public void validateAlias(FacesContext context, UIComponent toValidate, Object value) { if (!StringUtils.isEmpty((String) value)) { String alias = (String) value; boolean aliasFound = false; Dataverse dv = dataverseService.findByAlias(alias); if (editMode == DataversePage.EditMode.CREATE) { if (dv != null) { aliasFound = true; } } else { if (dv != null && !dv.getId().equals(dataverse.getId())) { aliasFound = true; } } if (aliasFound) { ((UIInput) toValidate).setValid(false); FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_ERROR, BundleUtil.getStringFromBundle("dataverse.alias"), BundleUtil.getStringFromBundle("dataverse.alias.taken")); context.addMessage(toValidate.getClientId(context), message); } } } private String returnRedirect(){ return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; } private Map<String, Integer> numberOfFacets = new HashMap<>(); public int getNumberOfFacets(String name, int defaultValue) { Integer numFacets = numberOfFacets.get(name); if (numFacets == null) { numberOfFacets.put(name, defaultValue); numFacets = defaultValue; } return numFacets; } public void incrementFacets(String name, int incrementNum) { Integer numFacets = numberOfFacets.get(name); if (numFacets == null) { numFacets = incrementNum; } numberOfFacets.put(name, numFacets + incrementNum); } private String query; private List<String> filterQueries = new ArrayList<>(); private List<FacetCategory> facetCategoryList = new ArrayList<>(); private String selectedTypesString; private String sortField; private SearchIncludeFragment.SortOrder sortOrder; private String searchFieldType = SearchFields.TYPE; private String searchFieldSubtree = SearchFields.SUBTREE; public String getQuery() { return query; } public void setQuery(String query) { this.query = query; } public List<String> getFilterQueries() { return filterQueries; } public void setFilterQueries(List<String> filterQueries) { this.filterQueries = filterQueries; } public List<FacetCategory> getFacetCategoryList() { return facetCategoryList; } public void setFacetCategoryList(List<FacetCategory> facetCategoryList) { this.facetCategoryList = facetCategoryList; } private int searchResultsCount = 0; public int getSearchResultsCount() { return searchResultsCount; } public void setSearchResultsCount(int searchResultsCount) { this.searchResultsCount = searchResultsCount; } public String getSelectedTypesString() { return selectedTypesString; } public void setSelectedTypesString(String selectedTypesString) { this.selectedTypesString = selectedTypesString; } public String getSortField() { return sortField; } public void setSortField(String sortField) { this.sortField = sortField; } public String getSortOrder() { if (sortOrder != null) { return sortOrder.toString(); } else { return null; } } /** * Allow only valid values to be set. * * Rather than passing in a String and converting it to an enum in this * method we could write a converter: * http://stackoverflow.com/questions/8609378/jsf-2-0-view-parameters-to-pass-objects */ public void setSortOrder(String sortOrderSupplied) { if (sortOrderSupplied != null) { if (sortOrderSupplied.equals(SearchIncludeFragment.SortOrder.asc.toString())) { this.sortOrder = SearchIncludeFragment.SortOrder.asc; } if (sortOrderSupplied.equals(SearchIncludeFragment.SortOrder.desc.toString())) { this.sortOrder = SearchIncludeFragment.SortOrder.desc; } } } public String getSearchFieldType() { return searchFieldType; } public void setSearchFieldType(String searchFieldType) { this.searchFieldType = searchFieldType; } public String getSearchFieldSubtree() { return searchFieldSubtree; } public void setSearchFieldSubtree(String searchFieldSubtree) { this.searchFieldSubtree = searchFieldSubtree; } public List<Dataverse> completeHostDataverseMenuList(String query) { if (session.getUser().isAuthenticated()) { return dataverseService.filterDataversesForHosting(query, dvRequestService.getDataverseRequest()); } else { return null; } } public Set<Entry<String, String>> getStorageDriverOptions() { HashMap<String, String> drivers =new HashMap<String, String>(); drivers.putAll(DataAccess.getStorageDriverLabels()); //Add an entry for the default (inherited from an ancestor or the system default) drivers.put(getDefaultStorageDriverLabel(), DataAccess.UNDEFINED_STORAGE_DRIVER_IDENTIFIER); return drivers.entrySet(); } public String getDefaultStorageDriverLabel() { String storageDriverId = DataAccess.DEFAULT_STORAGE_DRIVER_IDENTIFIER; Dataverse parent = dataverse.getOwner(); boolean fromAncestor=false; if(parent != null) { storageDriverId = parent.getEffectiveStorageDriverId(); //recurse dataverse chain to root and if any have a storagedriver set, fromAncestor is true while(parent!=null) { if(!parent.getStorageDriverId().equals(DataAccess.UNDEFINED_STORAGE_DRIVER_IDENTIFIER)) { fromAncestor=true; break; } parent=parent.getOwner(); } } String label = DataAccess.getStorageDriverLabelFor(storageDriverId); if(fromAncestor) { label = label + " " + BundleUtil.getStringFromBundle("dataverse.storage.inherited"); } else { label = label + " " + BundleUtil.getStringFromBundle("dataverse.storage.default"); } return label; } }
1
43,872
Another question: this is being removed, but I don't see a corresponding removal if it being called? Was it never used (or am I just missing it)? (is it related to what was in the onclick that got removed, i.e. was that supposed to call this and not something in the backing bean?)
IQSS-dataverse
java
@@ -186,6 +186,10 @@ func (h *Helper) ValidateIssuedCertificateRequest(cr *cmapi.CertificateRequest, } } + if !apiutil.CertificateRequestHasApproved(cr) { + return nil, fmt.Errorf("CertificateRequest does not have an Approved condition: %+v", cr.Status.Conditions) + } + return cert, nil }
1
/* Copyright 2020 The cert-manager Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package helper import ( "context" "crypto" "crypto/ecdsa" "crypto/rsa" "crypto/x509" "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" apiutil "github.com/jetstack/cert-manager/pkg/api/util" cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1" cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" "github.com/jetstack/cert-manager/pkg/util" "github.com/jetstack/cert-manager/pkg/util/pki" "github.com/jetstack/cert-manager/test/e2e/framework/log" ) // WaitForCertificateRequestReady waits for the CertificateRequest resource to // enter a Ready state. func (h *Helper) WaitForCertificateRequestReady(ns, name string, timeout time.Duration) (*cmapi.CertificateRequest, error) { var cr *cmapi.CertificateRequest err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { var err error log.Logf("Waiting for CertificateRequest %s to be ready", name) cr, err = h.CMClient.CertmanagerV1().CertificateRequests(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error getting CertificateRequest %s: %v", name, err) } isReady := apiutil.CertificateRequestHasCondition(cr, cmapi.CertificateRequestCondition{ Type: cmapi.CertificateRequestConditionReady, Status: cmmeta.ConditionTrue, }) if !isReady { log.Logf("Expected CertificateRequest to have Ready condition 'true' but it has: %v", cr.Status.Conditions) return false, nil } return true, nil }, ) if err != nil { return nil, err } return cr, nil } // ValidateIssuedCertificateRequest will ensure that the given // CertificateRequest has a certificate issued for it, and that the details on // the x509 certificate are correct as defined by the CertificateRequest's // spec. func (h *Helper) ValidateIssuedCertificateRequest(cr *cmapi.CertificateRequest, key crypto.Signer, rootCAPEM []byte) (*x509.Certificate, error) { csr, err := pki.DecodeX509CertificateRequestBytes(cr.Spec.Request) if err != nil { return nil, fmt.Errorf("failed to decode CertificateRequest's Spec.Request: %s", err) } // validate private key is of the correct type (rsa or ecdsa) switch csr.PublicKeyAlgorithm { case x509.RSA: _, ok := key.(*rsa.PrivateKey) if !ok { return nil, fmt.Errorf("Expected private key of type RSA, but it was: %T", key) } case x509.ECDSA: _, ok := key.(*ecdsa.PrivateKey) if !ok { return nil, fmt.Errorf("Expected private key of type ECDSA, but it was: %T", key) } default: return nil, fmt.Errorf("unrecognised requested private key algorithm %q", csr.PublicKeyAlgorithm) } // TODO: validate private key KeySize // check the provided certificate is valid expectedOrganization := csr.Subject.Organization expectedDNSNames := csr.DNSNames expectedIPAddresses := csr.IPAddresses expectedURIs := csr.URIs cert, err := pki.DecodeX509CertificateBytes(cr.Status.Certificate) if err != nil { return nil, err } commonNameCorrect := true expectedCN := csr.Subject.CommonName if len(expectedCN) == 0 && len(cert.Subject.CommonName) > 0 { if !util.Contains(cert.DNSNames, cert.Subject.CommonName) { commonNameCorrect = false } } else if expectedCN != cert.Subject.CommonName { commonNameCorrect = false } if !commonNameCorrect || !util.EqualUnsorted(cert.DNSNames, expectedDNSNames) || !util.EqualUnsorted(cert.Subject.Organization, expectedOrganization) || !util.EqualIPsUnsorted(cert.IPAddresses, expectedIPAddresses) || !util.EqualURLsUnsorted(cert.URIs, expectedURIs) { return nil, fmt.Errorf("Expected certificate valid for CN %q, O %v, dnsNames %v, IPs %v, URIs %v but got a certificate valid for CN %q, O %v, dnsNames %v, IPs %v URIs %v", expectedCN, expectedOrganization, expectedDNSNames, expectedIPAddresses, expectedURIs, cert.Subject.CommonName, cert.Subject.Organization, cert.DNSNames, cert.IPAddresses, cert.URIs) } var expectedDNSName string if len(expectedDNSNames) > 0 { expectedDNSName = expectedDNSNames[0] } certificateKeyUsages, certificateExtKeyUsages, err := pki.BuildKeyUsages(cr.Spec.Usages, cr.Spec.IsCA) if err != nil { return nil, fmt.Errorf("failed to build key usages from certificate: %s", err) } var keyAlg cmapi.PrivateKeyAlgorithm switch csr.PublicKeyAlgorithm { case x509.RSA: keyAlg = cmapi.RSAKeyAlgorithm case x509.ECDSA: keyAlg = cmapi.ECDSAKeyAlgorithm default: return nil, fmt.Errorf("unsupported key algorithm type: %s", csr.PublicKeyAlgorithm) } defaultCertKeyUsages, defaultCertExtKeyUsages, err := h.defaultKeyUsagesToAdd(cr.Namespace, &cr.Spec.IssuerRef) if err != nil { return nil, err } certificateKeyUsages |= defaultCertKeyUsages certificateExtKeyUsages = append(certificateExtKeyUsages, defaultCertExtKeyUsages...) certificateExtKeyUsages = h.deduplicateExtKeyUsages(certificateExtKeyUsages) // If using ECDSA then ignore key encipherment if keyAlg == cmapi.ECDSAKeyAlgorithm { certificateKeyUsages &^= x509.KeyUsageKeyEncipherment cert.KeyUsage &^= x509.KeyUsageKeyEncipherment } if !h.keyUsagesMatch(cert.KeyUsage, cert.ExtKeyUsage, certificateKeyUsages, certificateExtKeyUsages) { return nil, fmt.Errorf("key usages and extended key usages do not match: exp=%s got=%s exp=%s got=%s", apiutil.KeyUsageStrings(certificateKeyUsages), apiutil.KeyUsageStrings(cert.KeyUsage), apiutil.ExtKeyUsageStrings(certificateExtKeyUsages), apiutil.ExtKeyUsageStrings(cert.ExtKeyUsage)) } // TODO: move this verification step out of this function if rootCAPEM != nil { rootCertPool := x509.NewCertPool() rootCertPool.AppendCertsFromPEM(rootCAPEM) intermediateCertPool := x509.NewCertPool() intermediateCertPool.AppendCertsFromPEM(cr.Status.CA) opts := x509.VerifyOptions{ DNSName: expectedDNSName, Intermediates: intermediateCertPool, Roots: rootCertPool, } if _, err := cert.Verify(opts); err != nil { return nil, err } } return cert, nil } func (h *Helper) WaitCertificateRequestIssuedValid(ns, name string, timeout time.Duration, key crypto.Signer) error { return h.WaitCertificateRequestIssuedValidTLS(ns, name, timeout, key, nil) } func (h *Helper) WaitCertificateRequestIssuedValidTLS(ns, name string, timeout time.Duration, key crypto.Signer, rootCAPEM []byte) error { cr, err := h.WaitForCertificateRequestReady(ns, name, timeout) if err != nil { log.Logf("Error waiting for CertificateRequest to become Ready: %v", err) h.Kubectl(ns).DescribeResource("certificaterequest", name) h.Kubectl(ns).Describe("order", "challenge") return err } _, err = h.ValidateIssuedCertificateRequest(cr, key, rootCAPEM) if err != nil { log.Logf("Error validating issued certificate: %v", err) h.Kubectl(ns).DescribeResource("certificaterequest", name) h.Kubectl(ns).Describe("order", "challenge") return err } return nil }
1
25,469
Suggestion: `.. does not have an Approved condition set to true`
jetstack-cert-manager
go
@@ -77,6 +77,7 @@ class BlazeMeterUploader(Reporter, AggregatorListener): self.client.user_id = self.parameters.get("user-id", None) self.client.data_signature = self.parameters.get("signature", None) self.client.kpi_target = self.parameters.get("kpi-target", self.client.kpi_target) + self.client.delete_files_before_test = False if not self.client.test_id: try:
1
""" Module for reporting into http://www.blazemeter.com/ service Copyright 2015 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import copy import json import logging import math import os import sys import time import traceback import zipfile import yaml from urwid import Pile, Text from bzt import ManualShutdown from bzt.engine import Reporter, Provisioning, ScenarioExecutor, Configuration, Service from bzt.modules.aggregator import DataPoint, KPISet, ConsolidatingAggregator, ResultsProvider, AggregatorListener from bzt.modules.console import WidgetProvider, PrioritizedWidget from bzt.modules.jmeter import JMeterExecutor from bzt.modules.services import Unpacker from bzt.six import BytesIO, text_type, iteritems, HTTPError, urlencode, Request, urlopen, r_input, URLError from bzt.utils import open_browser, get_full_path, get_files_recursive, replace_in_config from bzt.utils import to_json, dehumanize_time, MultiPartForm, BetterDict class BlazeMeterUploader(Reporter, AggregatorListener): """ Reporter class :type client: BlazeMeterClient """ def __init__(self): super(BlazeMeterUploader, self).__init__() self.browser_open = 'start' self.client = BlazeMeterClient(self.log) self.test_id = "" self.kpi_buffer = [] self.send_interval = 30 self.sess_name = None self._last_status_check = time.time() def prepare(self): """ Read options for uploading, check that they're sane """ super(BlazeMeterUploader, self).prepare() self.client.logger_limit = self.settings.get("request-logging-limit", self.client.logger_limit) self.client.address = self.settings.get("address", self.client.address) self.client.data_address = self.settings.get("data-address", self.client.data_address) self.client.timeout = dehumanize_time(self.settings.get("timeout", self.client.timeout)) self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval)) self.browser_open = self.settings.get("browser-open", self.browser_open) token = self.settings.get("token", "") if not token: self.log.warning("No BlazeMeter API key provided, will upload anonymously") self.client.token = token self.client.active_session_id = self.parameters.get("session-id", None) self.client.test_id = self.parameters.get("test-id", None) self.client.user_id = self.parameters.get("user-id", None) self.client.data_signature = self.parameters.get("signature", None) self.client.kpi_target = self.parameters.get("kpi-target", self.client.kpi_target) if not self.client.test_id: try: self.client.ping() # to check connectivity and auth except HTTPError: self.log.error("Cannot reach online results storage, maybe the address/token is wrong") raise if token: finder = ProjectFinder(self.parameters, self.settings, self.client, self.engine) self.test_id = finder.resolve_test_id({"type": "external"}, self.engine.config, []) self.sess_name = self.parameters.get("report-name", self.settings.get("report-name", self.sess_name)) if self.sess_name == 'ask' and sys.stdin.isatty(): self.sess_name = r_input("Please enter report-name: ") if isinstance(self.engine.aggregator, ResultsProvider): self.engine.aggregator.add_listener(self) def startup(self): """ Initiate online test """ super(BlazeMeterUploader, self).startup() if not self.client.active_session_id: try: url = self.client.start_online(self.test_id, self.sess_name) self.log.info("Started data feeding: %s", url) if self.browser_open in ('start', 'both'): open_browser(url) except KeyboardInterrupt: raise except BaseException as exc: self.log.debug("Exception: %s", traceback.format_exc()) self.log.warning("Failed to start feeding: %s", exc) raise def __get_jtls_and_more(self): """ Compress all files in artifacts dir to single zipfile :return: BytesIO """ mfile = BytesIO() max_file_size = self.settings.get('artifact-upload-size-limit', 10) * 1024 * 1024 # 10MB with zipfile.ZipFile(mfile, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zfh: for handler in self.engine.log.parent.handlers: if isinstance(handler, logging.FileHandler): zfh.write(handler.baseFilename, os.path.basename(handler.baseFilename)) for root, _, files in os.walk(self.engine.artifacts_dir): for filename in files: if os.path.getsize(os.path.join(root, filename)) <= max_file_size: zfh.write(os.path.join(root, filename), os.path.join(os.path.relpath(root, self.engine.artifacts_dir), filename)) else: msg = "File %s exceeds maximum size quota of %s and won't be included into upload" self.log.warning(msg, filename, max_file_size) return mfile def __upload_artifacts(self): """ If token provided, upload artifacts folder contents and jmeter_log else: jmeter_log only :return: """ if self.client.token: self.log.info("Uploading all artifacts as jtls_and_more.zip ...") mfile = self.__get_jtls_and_more() self.client.upload_file("jtls_and_more.zip", mfile.getvalue()) for executor in self.engine.provisioning.executors: if isinstance(executor, JMeterExecutor): if executor.jmeter_log: self.log.info("Uploading %s", executor.jmeter_log) self.client.upload_file(executor.jmeter_log) def post_process(self): """ Upload results if possible """ if not self.client.active_session_id: self.log.debug("No feeding session obtained, nothing to finalize") return try: self.__send_data(self.kpi_buffer, False, True) self.kpi_buffer = [] finally: self._postproc_phase2() if self.client.results_url: if self.browser_open in ('end', 'both'): open_browser(self.client.results_url) self.log.info("Online report link: %s", self.client.results_url) def _postproc_phase2(self): try: self.__upload_artifacts() except IOError: self.log.warning("Failed artifact upload: %s", traceback.format_exc()) finally: self.set_last_status_check(self.parameters.get('forced-last-check', self._last_status_check)) tries = self.send_interval # NOTE: you dirty one... while not self._last_status_check and tries > 0: self.log.info("Waiting for ping...") time.sleep(self.send_interval) tries -= 1 self._postproc_phase3() def _postproc_phase3(self): try: self.client.end_online() if self.engine.stopping_reason: note = "%s: %s" % (self.engine.stopping_reason.__class__.__name__, str(self.engine.stopping_reason)) sess = self.client.get_session(self.client.active_session_id) if 'note' in sess: note += "\n" + sess['note'] self.client.update_session(self.client.active_session_id, {"note": note}) except KeyboardInterrupt: raise except BaseException as exc: self.log.warning("Failed to finish online: %s", exc) def check(self): """ Send data if any in buffer :return: """ self.log.debug("KPI bulk buffer len: %s", len(self.kpi_buffer)) if len(self.kpi_buffer): if self.client.last_ts < (time.time() - self.send_interval): self.__send_data(self.kpi_buffer) self.kpi_buffer = [] return super(BlazeMeterUploader, self).check() def __send_data(self, data, do_check=True, is_final=False): """ :param data: list[bzt.modules.aggregator.DataPoint] :return: """ if not self.client.active_session_id: return try: self.client.send_kpi_data(data, do_check, is_final) except IOError as _: self.log.debug("Error sending data: %s", traceback.format_exc()) self.log.warning("Failed to send data, will retry in %s sec...", self.client.timeout) try: time.sleep(self.client.timeout) self.client.send_kpi_data(data, do_check, is_final) self.log.info("Succeeded with retry") except IOError as _: self.log.error("Fatal error sending data: %s", traceback.format_exc()) self.log.warning("Will skip failed data and continue running") if not data: return try: self.client.send_error_summary(data) except IOError as exc: self.log.debug("Failed sending error summary: %s", traceback.format_exc()) self.log.warning("Failed to send error summary: %s", exc) def aggregated_second(self, data): """ Send online data :param data: DataPoint :return: """ self.kpi_buffer.append(data) def set_last_status_check(self, value): self._last_status_check = value self.log.debug("Set last check time to: %s", self._last_status_check) class ProjectFinder(object): def __init__(self, parameters, settings, client, engine): super(ProjectFinder, self).__init__() self.default_test_name = "Taurus Test" self.client = client self.parameters = parameters self.settings = settings self.engine = engine self.test_name = None def resolve_test_id(self, test_config, taurus_config, rfiles): proj_name = self.parameters.get("project", self.settings.get("project", None)) if isinstance(proj_name, (int, float)): proj_id = int(proj_name) self.engine.log.debug("Treating project name as ID: %s", proj_id) elif proj_name is not None: proj_id = self.client.project_by_name(proj_name) else: proj_id = None self.test_name = self.parameters.get("test", self.settings.get("test", self.default_test_name)) return self.client.test_by_name(self.test_name, test_config, taurus_config, rfiles, proj_id) class BlazeMeterClient(object): """ Service client class """ def __init__(self, parent_logger): self.kpi_target = 'labels_bulk' self.logger_limit = 256 self.user_id = None self.test_id = None self.log = parent_logger.getChild(self.__class__.__name__) self.token = None self.address = "https://a.blazemeter.com" self.data_address = "https://data.blazemeter.com" self.results_url = None self.active_session_id = None # FIXME: it's not good using it for both session id and master ID self.data_signature = None self.first_ts = sys.maxsize self.last_ts = 0 self.timeout = 10 self.delete_files_before_test = True def _request(self, url, data=None, headers=None, checker=None, method=None): if not headers: headers = {} if self.token: headers["X-Api-Key"] = self.token log_method = 'GET' if data is None else 'POST' if method: log_method = method url = str(url) self.log.debug("Request: %s %s %s", log_method, url, data[:self.logger_limit] if data else None) # .encode("utf-8") is probably better data = data.encode() if isinstance(data, text_type) else data req = Request(url, data, headers) if method: req.get_method = lambda: method response = urlopen(req, timeout=self.timeout) if checker: checker(response) resp = response.read() if not isinstance(resp, str): resp = resp.decode() self.log.debug("Response: %s", resp[:self.logger_limit] if resp else None) try: return json.loads(resp) if len(resp) else {} except ValueError: self.log.warning("Non-JSON response from API: %s", resp) raise def start_online(self, test_id, session_name): """ Start online test :type test_id: str :return: """ self.log.info("Initiating data feeding...") data = urlencode({}) if self.token: url = self.address + "/api/latest/tests/%s/start-external" % test_id else: url = self.address + "/api/latest/sessions" resp = self._request(url, data) self.active_session_id = str(resp['result']['session']['id']) self.data_signature = str(resp['result']['signature']) self.test_id = test_id self.user_id = str(resp['result']['session']['userId']) if self.token: self.results_url = self.address + '/app/#reports/%s' % self.active_session_id if session_name: url = self.address + "/api/latest/sessions/%s" % self.active_session_id self._request(url, to_json({"name": str(session_name)}), headers={"Content-Type": "application/json"}, method='PATCH') else: self.test_id = resp['result']['session']['testId'] self.results_url = resp['result']['publicTokenUrl'] return self.results_url def start_taurus(self, test_id): """ Start online test :type test_id: str :return: """ self.log.info("Initiating cloud test with %s ...", self.address) data = urlencode({}) url = self.address + "/api/latest/tests/%s/start" % test_id resp = self._request(url, data) self.log.debug("Response: %s", resp['result']) self.active_session_id = str(resp['result']['id']) self.results_url = self.address + '/app/#reports/%s' % self.active_session_id return self.results_url def end_online(self): """ Finish online test """ if not self.active_session_id: self.log.debug("Feeding not started, so not stopping") else: self.log.info("Ending data feeding...") if self.token: url = self.address + "/api/latest/sessions/%s/terminate" self._request(url % self.active_session_id) else: url = self.address + "/api/latest/sessions/%s/terminateExternal" data = {"signature": self.data_signature, "testId": self.test_id, "sessionId": self.active_session_id} self._request(url % self.active_session_id, json.dumps(data)) def end_master(self, master_id): if master_id: self.log.info("Ending cloud test...") url = self.address + "/api/latest/masters/%s/terminate" self._request(url % master_id) def project_by_name(self, proj_name): """ :type proj_name: str :rtype: int """ projects = self.get_projects() matching = [] for project in projects: if project['name'] == proj_name: matching.append(project['id']) if len(matching) > 1: self.log.warning("Several projects IDs matched with '%s': %s", proj_name, matching) raise ValueError("Project name is ambiguous, please use project ID instead of name to distinguish it") elif len(matching) == 1: return matching[0] else: self.log.info("Creating project '%s'...", proj_name) return self.create_project(proj_name) def test_by_name(self, name, configuration, taurus_config, resource_files, proj_id): """ :type name: str :rtype: str """ tests = self.get_tests() test_id = None for test in tests: self.log.debug("Test: %s", test) if "name" in test and test['name'] == name: if test['configuration']['type'] == configuration['type']: if not proj_id or proj_id == test['projectId']: test_id = test['id'] self.log.debug("Matched: %s", test) if not test_id: self.log.debug("Creating new test") url = self.address + '/api/latest/tests' data = {"name": name, "projectId": proj_id, "configuration": configuration} hdr = {"Content-Type": " application/json"} resp = self._request(url, json.dumps(data), headers=hdr) test_id = resp['result']['id'] if self.delete_files_before_test: self.delete_test_files(test_id) if configuration['type'] == 'taurus': # FIXME: this is weird way to code, subclass it or something self.log.debug("Uploading files into the test: %s", resource_files) url = '%s/api/latest/tests/%s/files' % (self.address, test_id) body = MultiPartForm() body.add_file_as_string('script', 'taurus.yml', yaml.dump(taurus_config, default_flow_style=False, explicit_start=True, canonical=False)) for rfile in resource_files: body.add_file('files[]', rfile) hdr = {"Content-Type": str(body.get_content_type())} _ = self._request(url, body.form_as_bytes(), headers=hdr) self.log.debug("Using test ID: %s", test_id) return test_id def get_tests(self): """ :rtype: list """ tests = self._request(self.address + '/api/latest/tests') self.log.debug("Tests for user: %s", len(tests['result'])) return tests['result'] def send_kpi_data(self, data_buffer, is_check_response=True, is_final=False): """ Sends online data :param is_check_response: :type data_buffer: list[bzt.modules.aggregator.DataPoint] """ data = [] for sec in data_buffer: self.first_ts = min(self.first_ts, sec[DataPoint.TIMESTAMP]) self.last_ts = max(self.last_ts, sec[DataPoint.TIMESTAMP]) for lbl, item in iteritems(sec[DataPoint.CURRENT]): if lbl == '': label = "ALL" else: label = lbl json_item = None for lbl_item in data: if lbl_item["name"] == label: json_item = lbl_item break if not json_item: json_item = self.__label_skel(label) data.append(json_item) interval_item = self.__interval_json(item, sec) for r_code, cnt in iteritems(item[KPISet.RESP_CODES]): interval_item['rc'].append({"n": cnt, "rc": r_code}) json_item['intervals'].append(interval_item) cumul = sec[DataPoint.CUMULATIVE][lbl] json_item['n'] = cumul[KPISet.SAMPLE_COUNT] json_item["summary"] = self.__summary_json(cumul) data = {"labels": data, "sourceID": id(self)} if is_final: data['final'] = True url = self.data_address + "/submit.php?session_id=%s&signature=%s&test_id=%s&user_id=%s" url = url % (self.active_session_id, self.data_signature, self.test_id, self.user_id) url += "&pq=0&target=%s&update=1" % self.kpi_target hdr = {"Content-Type": " application/json"} response = self._request(url, to_json(data), headers=hdr) if response and 'response_code' in response and response['response_code'] != 200: raise RuntimeError("Failed to feed data, response code %s" % response['response_code']) if response and 'result' in response and is_check_response: result = response['result']['session'] self.log.debug("Result: %s", result) if 'statusCode' in result and result['statusCode'] > 100: self.log.info("Test was stopped through Web UI: %s", result['status']) raise ManualShutdown("The test was interrupted through Web UI") def __label_skel(self, name): return { "n": None, "name": name, "interval": 1, "intervals": [], "samplesNotCounted": 0, "assertionsNotCounted": 0, "failedEmbeddedResources": [], "failedEmbeddedResourcesSpilloverCount": 0, "otherErrorsCount": 0, "errors": [], "percentileHistogram": [], "percentileHistogramLatency": [], "percentileHistogramBytes": [], "empty": False, } def __summary_json(self, cumul): return { "first": self.first_ts, "last": self.last_ts, "duration": self.last_ts - self.first_ts, "failed": cumul[KPISet.FAILURES], "hits": cumul[KPISet.SAMPLE_COUNT], "avg": int(1000 * cumul[KPISet.AVG_RESP_TIME]), "min": int(1000 * cumul[KPISet.PERCENTILES]["0.0"]) if "0.0" in cumul[KPISet.PERCENTILES] else 0, "max": int(1000 * cumul[KPISet.PERCENTILES]["100.0"]) if "100.0" in cumul[KPISet.PERCENTILES] else 0, "std": int(1000 * cumul[KPISet.STDEV_RESP_TIME]), "tp90": int(1000 * cumul[KPISet.PERCENTILES]["90.0"]) if "90.0" in cumul[KPISet.PERCENTILES] else 0, "tp95": int(1000 * cumul[KPISet.PERCENTILES]["95.0"]) if "95.0" in cumul[KPISet.PERCENTILES] else 0, "tp99": int(1000 * cumul[KPISet.PERCENTILES]["99.0"]) if "99.0" in cumul[KPISet.PERCENTILES] else 0, "latencyAvg": int(1000 * cumul[KPISet.AVG_LATENCY]), "latencyMax": 0, "latencyMin": 0, "latencySTD": 0, "bytes": 0, "bytesMax": 0, "bytesMin": 0, "bytesAvg": 0, "bytesSTD": 0, "otherErrorsSpillcount": 0, } def __interval_json(self, item, sec): return { "ec": item[KPISet.FAILURES], "ts": sec[DataPoint.TIMESTAMP], "na": item[KPISet.CONCURRENCY], "n": item[KPISet.SAMPLE_COUNT], "failed": item[KPISet.FAILURES], "rc": [], # filled later "t": { "min": int(1000 * item[KPISet.PERCENTILES]["0.0"]) if "0.0" in item[KPISet.PERCENTILES] else 0, "max": int(1000 * item[KPISet.PERCENTILES]["100.0"]) if "100.0" in item[KPISet.PERCENTILES] else 0, "sum": 1000 * item[KPISet.AVG_RESP_TIME] * item[KPISet.SAMPLE_COUNT], "n": item[KPISet.SAMPLE_COUNT], "std": 1000 * item[KPISet.STDEV_RESP_TIME], "avg": 1000 * item[KPISet.AVG_RESP_TIME] }, "lt": { "min": 0, "max": 0, "sum": 1000 * item[KPISet.AVG_LATENCY] * item[KPISet.SAMPLE_COUNT], "n": 1000 * item[KPISet.SAMPLE_COUNT], "std": 0, "avg": 1000 * item[KPISet.AVG_LATENCY] }, "by": { "min": 0, "max": 0, "sum": 0, "n": 0, "std": 0, "avg": 0 }, } def ping(self): """ Quick check if we can access the service """ self._request(self.address + '/api/latest/web/version') def upload_file(self, filename, contents=None): """ Upload single artifact :type filename: str :type contents: str :raise IOError: """ body = MultiPartForm() if contents is None: body.add_file('file', filename) else: body.add_file_as_string('file', filename, contents) url = self.address + "/api/latest/image/%s/files?signature=%s" url = url % (self.active_session_id, self.data_signature) hdr = {"Content-Type": str(body.get_content_type())} response = self._request(url, body.form_as_bytes(), headers=hdr) if not response['result']: raise IOError("Upload failed: %s" % response) def send_error_summary(self, data_buffer): """ Sends error summary file :type data_buffer: list[bzt.modules.aggregator.DataPoint] """ if not data_buffer: return recent = data_buffer[-1] if not recent[DataPoint.CUMULATIVE][''][KPISet.ERRORS]: return errors = self.__errors_skel(recent[DataPoint.TIMESTAMP], self.active_session_id, self.test_id, self.user_id) for label, label_data in iteritems(recent[DataPoint.CUMULATIVE]): if not label_data[KPISet.ERRORS]: continue if label == '': label = 'ALL' error_item = self.__error_item_skel(label) for err_item in label_data[KPISet.ERRORS]: if err_item["type"] == KPISet.ERRTYPE_ASSERT: error_item['assertionsCount'] += err_item['cnt'] error_item['assertions'].append({ "name": "All Assertions", "failureMessage": err_item['msg'], "failure": True, "error": False, "count": err_item['cnt'] }) else: error_item['count'] += err_item['cnt'] error_item['responseInfo'].append({ "description": err_item['msg'], "code": err_item['rc'], "count": err_item['cnt'], }) errors['summery']['labels'].append(error_item) self.upload_file("sample.jtl.blazemeter.summery.json", to_json(errors)) def __errors_skel(self, t_stamp, sess_id, test_id, user_id): return { "reportInfo": { "sessionId": sess_id, "timestamp": t_stamp, "userId": user_id, "testId": test_id, "type": "SUMMERY", # "testName": test_name }, "timestamp": t_stamp, "summery": { "labels": [], "empty": False } } def __error_item_skel(self, label): return { "name": label, "count": 0, "responseInfo": [], "assertionsCount": 0, "assertions": [], "embeddedResourcesCount": 0, "embeddedResources": [], } def get_session(self, session_id): sess = self._request(self.address + '/api/latest/sessions/%s' % session_id) return sess['result'] def get_master(self, master_id): sess = self._request(self.address + '/api/latest/masters/%s' % master_id) return sess['result'] def get_master_status(self, master_id): sess = self._request(self.address + '/api/latest/masters/%s/status' % master_id) return sess['result'] def get_master_sessions(self, master_id): sess = self._request(self.address + '/api/latest/masters/%s/sessions' % master_id) return sess['result']['sessions'] if 'sessions' in sess['result'] else sess['result'] def get_projects(self): data = self._request(self.address + '/api/latest/projects') return data['result'] def create_project(self, proj_name): hdr = {"Content-Type": "application/json"} data = self._request(self.address + '/api/latest/projects', to_json({"name": str(proj_name)}), headers=hdr) return data['result']['id'] def get_user_info(self): res = self._request(self.address + '/api/latest/user') return res def get_kpis(self, master_id, min_ts): params = [ ("interval", 1), ("from", min_ts), ("master_ids[]", master_id), ] for item in ('t', 'lt', 'by', 'n', 'ec', 'ts', 'na'): params.append(("kpis[]", item)) labels = self.get_labels(master_id) for label in labels: params.append(("labels[]", label['id'])) url = self.address + "/api/latest/data/kpis?" + urlencode(params) res = self._request(url) return res['result'] def get_labels(self, master_id): url = self.address + "/api/latest/data/labels?" + urlencode({'master_id': master_id}) res = self._request(url) return res['result'] def update_session(self, active_session_id, data): hdr = {"Content-Type": "application/json"} data = self._request(self.address + '/api/latest/sessions/%s' % active_session_id, to_json(data), headers=hdr, method="PUT") return data['result'] def get_available_locations(self): user_info = self.get_user_info() return {str(x['id']): x for x in user_info['locations'] if not x['id'].startswith('harbor-')} def get_test_files(self, test_id): path = self.address + "/api/latest/web/elfinder/%s" % test_id query = urlencode({'cmd': 'open', 'target': 's1_Lw'}) url = path + '?' + query response = self._request(url) return response["files"] def delete_test_files(self, test_id): files = self.get_test_files(test_id) self.log.debug("Test files: %s", [filedict['name'] for filedict in files]) if not files: return path = "/api/latest/web/elfinder/%s" % test_id query = "cmd=rm&" + "&".join("targets[]=%s" % fname['hash'] for fname in files) url = self.address + path + '?' + query response = self._request(url) if len(response['removed']) == len(files): self.log.debug("Successfully deleted %d test files", len(response['removed'])) def get_aggregate_report(self, master_id): url = self.address + "/api/latest/masters/%s/reports/aggregatereport/data" % master_id res = self._request(url) return res['result'] class MasterProvisioning(Provisioning): def get_rfiles(self): rfiles = [] for executor in self.executors: rfiles += executor.get_resource_files() self.log.debug("All resource files are: %s", rfiles) rfiles = [self.engine.find_file(x) for x in rfiles] rbases = [os.path.basename(get_full_path(rfile)) for rfile in rfiles] rpaths = [get_full_path(rfile, step_up=1) for rfile in rfiles] while rbases: base, path = rbases.pop(), rpaths.pop() if base in rbases: index = rbases.index(base) if path != rpaths[index]: message = 'Resource "%s" occurs more than one time, rename to avoid data loss' % base raise ValueError(message) prepared_files = self.__pack_dirs(rfiles) replace_in_config(self.engine.config, rfiles, list(map(os.path.basename, prepared_files)), log=self.log) return prepared_files def __pack_dirs(self, source_list): result_list = [] # files for upload packed_list = [] # files for unpacking for source in source_list: source = get_full_path(source) if os.path.isfile(source): result_list.append(source) else: # source is dir self.log.debug("Compress directory '%s'", source) base_dir_name = os.path.basename(source) zip_name = self.engine.create_artifact(base_dir_name, '.zip') relative_prefix_len = len(os.path.dirname(source)) with zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_STORED) as zip_file: for _file in get_files_recursive(source): zip_file.write(_file, _file[relative_prefix_len:]) result_list.append(zip_name) packed_list.append(base_dir_name + '.zip') if packed_list: services = self.engine.config.get(Service.SERV, []) services.append({'module': Unpacker.UNPACK, Unpacker.FILES: packed_list}) return result_list class CloudProvisioning(MasterProvisioning, WidgetProvider): """ :type client: BlazeMeterClient :type results_reader: ResultsFromBZA """ LOC = "locations" def __init__(self): super(CloudProvisioning, self).__init__() self.results_reader = None self.client = BlazeMeterClient(self.log) self.test_id = None self.test_name = None self.__last_master_status = None self.browser_open = 'start' self.widget = None def prepare(self): if self.settings.get("dump-locations", False): self.log.warning("Dumping available locations instead of running the test") self._configure_client() info = self.client.get_user_info() locations = self.client.get_available_locations() for item in info['locations']: if item['id'] in locations: self.log.info("Location: %s\t%s", item['id'], item['title']) raise ManualShutdown("Done listing locations") super(CloudProvisioning, self).prepare() self.browser_open = self.settings.get("browser-open", self.browser_open) self._configure_client() self.__prepare_locations() rfiles = self.get_rfiles() config = self.get_config_for_cloud() bza_plugin = self.__get_bza_test_config() finder = ProjectFinder(self.parameters, self.settings, self.client, self.engine) finder.default_test_name = "Taurus Cloud Test" self.test_id = finder.resolve_test_id(bza_plugin, config, rfiles) self.test_name = finder.test_name self.widget = CloudProvWidget(self) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.results_reader = ResultsFromBZA(self.client) self.results_reader.log = self.log self.engine.aggregator.add_underling(self.results_reader) def _configure_client(self): self.client.logger_limit = self.settings.get("request-logging-limit", self.client.logger_limit) # TODO: go to "blazemeter" section for these settings by default? self.client.address = self.settings.get("address", self.client.address) self.client.token = self.settings.get("token", self.client.token) self.client.timeout = dehumanize_time(self.settings.get("timeout", self.client.timeout)) self.client.delete_files_before_test = self.settings.get("delete-test-files", self.client.delete_files_before_test) if not self.client.token: bmmod = self.engine.instantiate_module('blazemeter') self.client.token = bmmod.settings.get("token") if not self.client.token: raise ValueError("You must provide API token to use cloud provisioning") def __prepare_locations(self): available_locations = self.client.get_available_locations() for executor in self.executors: locations = self._get_locations(available_locations, executor) executor.get_load() # we need it to resolve load settings into full form for location in locations.keys(): if location not in available_locations: self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys())) raise ValueError("Invalid location requested: %s" % location) def get_config_for_cloud(self): config = copy.deepcopy(self.engine.config) if not isinstance(config[ScenarioExecutor.EXEC], list): config[ScenarioExecutor.EXEC] = [config[ScenarioExecutor.EXEC]] provisioning = config.pop(Provisioning.PROV) for execution in config[ScenarioExecutor.EXEC]: execution[ScenarioExecutor.CONCURR] = execution.get(ScenarioExecutor.CONCURR).get(provisioning, None) execution[ScenarioExecutor.THRPT] = execution.get(ScenarioExecutor.THRPT).get(provisioning, None) for key in list(config.keys()): if key not in ("scenarios", ScenarioExecutor.EXEC, "included-configs", Service.SERV): config.pop(key) # cleanup configuration from empty values default_values = { 'concurrency': None, 'iterations': None, 'ramp-up': None, 'steps': None, 'throughput': None, 'hold-for': 0, 'files': [] } for execution in config[ScenarioExecutor.EXEC]: for key, value in iteritems(default_values): if execution[key] == value: execution.pop(key) assert isinstance(config, Configuration) config.dump(self.engine.create_artifact("cloud", "")) return config def __get_bza_test_config(self): bza_plugin = { "type": "taurus", "plugins": { "taurus": { "filename": "" # without this line it does not work } } } return bza_plugin def _get_locations(self, available_locations, executor): locations = executor.execution.get(self.LOC, BetterDict()) if not locations: for location in available_locations.values(): error = ValueError("No location specified and no default-location configured") def_loc = self.settings.get("default-location", error) if location['sandbox'] or location['id'] == def_loc: locations.merge({location['id']: 1}) if not locations: self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys())) raise ValueError("No sandbox location available, please specify locations manually") return locations def startup(self): super(CloudProvisioning, self).startup() self.client.start_taurus(self.test_id) self.log.info("Started cloud test: %s", self.client.results_url) if self.client.results_url: if self.browser_open in ('start', 'both'): open_browser(self.client.results_url) def check(self): # TODO: throttle down requests try: master = self.client.get_master_status(self.client.active_session_id) except URLError: self.log.warning("Failed to get test status, will retry in %s seconds...", self.client.timeout) self.log.debug("Full exception: %s", traceback.format_exc()) time.sleep(self.client.timeout) master = self.client.get_master_status(self.client.active_session_id) self.log.info("Succeeded with retry") if "status" in master and master['status'] != self.__last_master_status: self.__last_master_status = master['status'] self.log.info("Cloud test status: %s", self.__last_master_status) if self.results_reader is not None and 'progress' in master and master['progress'] >= 100: self.results_reader.master_id = self.client.active_session_id if 'progress' in master and master['progress'] > 100: self.log.info("Test was stopped in the cloud: %s", master['status']) status = self.client.get_master(self.client.active_session_id) if 'note' in status and status['note']: self.log.warning("Cloud test has probably failed with message: %s", status['note']) self.client.active_session_id = None return True self.widget.update() return super(CloudProvisioning, self).check() def post_process(self): self.client.end_master(self.client.active_session_id) if self.client.results_url: if self.browser_open in ('end', 'both'): open_browser(self.client.results_url) def weight_locations(self, locations, load, available_locations): total = float(sum(locations.values())) for loc_name, share in iteritems(locations): loc_info = available_locations[loc_name] limits = loc_info['limits'] if load.duration > limits['duration'] * 60: msg = "Test duration %s exceeds limit %s for location %s" self.log.warning(msg, load.duration, limits['duration'] * 60, loc_name) if load.concurrency: locations[loc_name] = int(math.ceil(load.concurrency * share / total / limits['threadsPerEngine'])) else: locations[loc_name] = 1 def get_widget(self): self.widget = CloudProvWidget(self) return self.widget class BlazeMeterClientEmul(BlazeMeterClient): def __init__(self, parent_logger): super(BlazeMeterClientEmul, self).__init__(parent_logger) self.results = [] def _request(self, url, data=None, headers=None, checker=None, method=None): self.log.debug("Request %s: %s", url, data) res = self.results.pop(0) self.log.debug("Response: %s", res) return res class ResultsFromBZA(ResultsProvider): """ :type client: BlazeMeterClient """ def __init__(self, client): super(ResultsFromBZA, self).__init__() self.client = client self.master_id = None # must be set afterwards self.min_ts = 0 self.log = logging.getLogger('') def _calculate_datapoints(self, final_pass=False): if self.master_id is None: return data, aggr_raw = self.query_data() aggr = {} for label in aggr_raw: aggr[label['labelName']] = label for label in data: if label['kpis'] and not final_pass: label['kpis'].pop(-1) # never take last second since it could be incomplete timestamps = [] for label in data: if label['label'] == 'ALL': timestamps.extend([kpi['ts'] for kpi in label['kpis']]) for tstmp in timestamps: point = DataPoint(tstmp) for label in data: for kpi in label['kpis']: if kpi['ts'] != tstmp: continue kpiset = KPISet() kpiset[KPISet.FAILURES] = kpi['ec'] kpiset[KPISet.CONCURRENCY] = kpi['na'] kpiset[KPISet.SAMPLE_COUNT] = kpi['n'] kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0 kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0 perc_map = {'90line': 90.0, "95line": 95.0, "99line": 99.0} for field, level in iteritems(perc_map): kpiset[KPISet.PERCENTILES][str(level)] = aggr[label['label']][field] point[DataPoint.CURRENT]['' if label['label'] == 'ALL' else label['label']] = kpiset point.recalculate() self.min_ts = point[DataPoint.TIMESTAMP] + 1 yield point def query_data(self): try: data = self.client.get_kpis(self.master_id, self.min_ts) except URLError: self.log.warning("Failed to get result KPIs, will retry in %s seconds...", self.client.timeout) self.log.debug("Full exception: %s", traceback.format_exc()) time.sleep(self.client.timeout) data = self.client.get_kpis(self.master_id, self.min_ts) self.log.info("Succeeded with retry") try: aggr = self.client.get_aggregate_report(self.master_id) except URLError: self.log.warning("Failed to get aggregate results, will retry in %s seconds...", self.client.timeout) self.log.debug("Full exception: %s", traceback.format_exc()) time.sleep(self.client.timeout) aggr = self.client.get_aggregate_report(self.master_id) self.log.info("Succeeded with retry") return data, aggr class CloudProvWidget(Pile, PrioritizedWidget): def __init__(self, prov): """ :type prov: CloudProvisioning """ self.prov = prov self.text = Text("") self._sessions = None super(CloudProvWidget, self).__init__([self.text]) PrioritizedWidget.__init__(self, priority=0) def update(self): if not self._sessions: self._sessions = self.prov.client.get_master_sessions(self.prov.client.active_session_id) if not self._sessions: return mapping = BetterDict() cnt = 0 for session in self._sessions: try: cnt += 1 name_split = session['name'].split('/') location = session['configuration']['location'] count = session['configuration']['serversCount'] mapping.get(name_split[0]).get(name_split[1])[location] = count except KeyError: self._sessions = None txt = "%s #%s\n" % (self.prov.test_name, self.prov.client.active_session_id) for executor, scenarios in iteritems(mapping): txt += " %s" % executor for scenario, locations in iteritems(scenarios): txt += " %s:\n" % scenario for location, count in iteritems(locations): txt += " Agents in %s: %s\n" % (location, count) self.text.set_text(txt)
1
13,697
I would say that client class should have default of False and Cloud prov should enable deleting.
Blazemeter-taurus
py
@@ -1,4 +1,4 @@ -// Copyright (c) .NET Foundation. All rights reserved. +// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System;
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Collections.Generic; using System.IO.Pipelines; using System.Threading; using Microsoft.AspNetCore.Http.Features; using Microsoft.AspNetCore.Server.Kestrel.Core.Adapter.Internal; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure; using Microsoft.AspNetCore.Testing; using Moq; using Xunit; namespace Microsoft.AspNetCore.Server.Kestrel.Core.Tests { public class HttpConnectionTests : IDisposable { private readonly PipeFactory _pipeFactory; private readonly HttpConnectionContext _httpConnectionContext; private readonly HttpConnection _httpConnection; public HttpConnectionTests() { _pipeFactory = new PipeFactory(); var pair = _pipeFactory.CreateConnectionPair(); _httpConnectionContext = new HttpConnectionContext { ConnectionId = "0123456789", ConnectionAdapters = new List<IConnectionAdapter>(), ConnectionFeatures = new FeatureCollection(), PipeFactory = _pipeFactory, HttpConnectionId = long.MinValue, Application = pair.Application, Transport = pair.Transport, ServiceContext = new TestServiceContext { SystemClock = new SystemClock() } }; _httpConnection = new HttpConnection(_httpConnectionContext); } public void Dispose() { _pipeFactory.Dispose(); } [Fact] public void DoesNotTimeOutWhenDebuggerIsAttached() { var mockDebugger = new Mock<IDebugger>(); mockDebugger.SetupGet(g => g.IsAttached).Returns(true); _httpConnection.Debugger = mockDebugger.Object; _httpConnection.CreateHttp1Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.CreateHttp2Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); var now = DateTimeOffset.Now; _httpConnection.Tick(now); _httpConnection.SetTimeout(1, TimeoutAction.SendTimeoutResponse); _httpConnection.Tick(now.AddTicks(2).Add(Heartbeat.Interval)); Assert.False(_httpConnection.RequestTimedOut); } [Fact] public void DoesNotTimeOutWhenRequestBodyDoesNotSatisfyMinimumDataRateButDebuggerIsAttached() { var mockDebugger = new Mock<IDebugger>(); mockDebugger.SetupGet(g => g.IsAttached).Returns(true); _httpConnection.Debugger = mockDebugger.Object; var bytesPerSecond = 100; var mockLogger = new Mock<IKestrelTrace>(); mockLogger.Setup(l => l.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<double>())).Throws(new InvalidOperationException("Should not log")); TickBodyWithMinimumDataRate(mockLogger.Object, bytesPerSecond); Assert.False(_httpConnection.RequestTimedOut); } [Fact] public void TimesOutWhenRequestBodyDoesNotSatisfyMinimumDataRate() { var bytesPerSecond = 100; var mockLogger = new Mock<IKestrelTrace>(); TickBodyWithMinimumDataRate(mockLogger.Object, bytesPerSecond); // Timed out Assert.True(_httpConnection.RequestTimedOut); mockLogger.Verify(logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), bytesPerSecond), Times.Once); } private void TickBodyWithMinimumDataRate(IKestrelTrace logger, int bytesPerSecond) { var gracePeriod = TimeSpan.FromSeconds(5); _httpConnectionContext.ServiceContext.ServerOptions.Limits.MinRequestBodyDataRate = new MinDataRate(bytesPerSecond: bytesPerSecond, gracePeriod: gracePeriod); _httpConnectionContext.ServiceContext.Log = logger; _httpConnection.CreateHttp1Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.CreateHttp2Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.Http1Connection.Reset(); // Initialize timestamp var now = DateTimeOffset.UtcNow; _httpConnection.Tick(now); _httpConnection.StartTimingReads(); // Tick after grace period w/ low data rate now += gracePeriod + TimeSpan.FromSeconds(1); _httpConnection.BytesRead(1); _httpConnection.Tick(now); } [Fact] public void RequestBodyMinimumDataRateNotEnforcedDuringGracePeriod() { var bytesPerSecond = 100; var gracePeriod = TimeSpan.FromSeconds(2); _httpConnectionContext.ServiceContext.ServerOptions.Limits.MinRequestBodyDataRate = new MinDataRate(bytesPerSecond: bytesPerSecond, gracePeriod: gracePeriod); var mockLogger = new Mock<IKestrelTrace>(); _httpConnectionContext.ServiceContext.Log = mockLogger.Object; _httpConnection.CreateHttp1Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.CreateHttp2Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.Http1Connection.Reset(); // Initialize timestamp var now = DateTimeOffset.UtcNow; _httpConnection.Tick(now); _httpConnection.StartTimingReads(); // Tick during grace period w/ low data rate now += TimeSpan.FromSeconds(1); _httpConnection.BytesRead(10); _httpConnection.Tick(now); // Not timed out Assert.False(_httpConnection.RequestTimedOut); mockLogger.Verify(logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), bytesPerSecond), Times.Never); // Tick after grace period w/ low data rate now += TimeSpan.FromSeconds(2); _httpConnection.BytesRead(10); _httpConnection.Tick(now); // Timed out Assert.True(_httpConnection.RequestTimedOut); mockLogger.Verify(logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), bytesPerSecond), Times.Once); } [Fact] public void RequestBodyDataRateIsAveragedOverTimeSpentReadingRequestBody() { var bytesPerSecond = 100; var gracePeriod = TimeSpan.FromSeconds(2); _httpConnectionContext.ServiceContext.ServerOptions.Limits.MinRequestBodyDataRate = new MinDataRate(bytesPerSecond: bytesPerSecond, gracePeriod: gracePeriod); var mockLogger = new Mock<IKestrelTrace>(); _httpConnectionContext.ServiceContext.Log = mockLogger.Object; _httpConnection.CreateHttp1Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.CreateHttp2Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.Http1Connection.Reset(); // Initialize timestamp var now = DateTimeOffset.UtcNow; _httpConnection.Tick(now); _httpConnection.StartTimingReads(); // Set base data rate to 200 bytes/second now += gracePeriod; _httpConnection.BytesRead(400); _httpConnection.Tick(now); // Data rate: 200 bytes/second now += TimeSpan.FromSeconds(1); _httpConnection.BytesRead(200); _httpConnection.Tick(now); // Not timed out Assert.False(_httpConnection.RequestTimedOut); mockLogger.Verify(logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), bytesPerSecond), Times.Never); // Data rate: 150 bytes/second now += TimeSpan.FromSeconds(1); _httpConnection.BytesRead(0); _httpConnection.Tick(now); // Not timed out Assert.False(_httpConnection.RequestTimedOut); mockLogger.Verify(logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), bytesPerSecond), Times.Never); // Data rate: 120 bytes/second now += TimeSpan.FromSeconds(1); _httpConnection.BytesRead(0); _httpConnection.Tick(now); // Not timed out Assert.False(_httpConnection.RequestTimedOut); mockLogger.Verify(logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), bytesPerSecond), Times.Never); // Data rate: 100 bytes/second now += TimeSpan.FromSeconds(1); _httpConnection.BytesRead(0); _httpConnection.Tick(now); // Not timed out Assert.False(_httpConnection.RequestTimedOut); mockLogger.Verify(logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), bytesPerSecond), Times.Never); // Data rate: ~85 bytes/second now += TimeSpan.FromSeconds(1); _httpConnection.BytesRead(0); _httpConnection.Tick(now); // Timed out Assert.True(_httpConnection.RequestTimedOut); mockLogger.Verify(logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), bytesPerSecond), Times.Once); } [Fact] public void RequestBodyDataRateNotComputedOnPausedTime() { var systemClock = new MockSystemClock(); _httpConnectionContext.ServiceContext.ServerOptions.Limits.MinRequestBodyDataRate = new MinDataRate(bytesPerSecond: 100, gracePeriod: TimeSpan.FromSeconds(2)); _httpConnectionContext.ServiceContext.SystemClock = systemClock; var mockLogger = new Mock<IKestrelTrace>(); _httpConnectionContext.ServiceContext.Log = mockLogger.Object; _httpConnection.CreateHttp1Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.CreateHttp2Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.Http1Connection.Reset(); // Initialize timestamp _httpConnection.Tick(systemClock.UtcNow); _httpConnection.StartTimingReads(); // Tick at 3s, expected counted time is 3s, expected data rate is 200 bytes/second systemClock.UtcNow += TimeSpan.FromSeconds(3); _httpConnection.BytesRead(600); _httpConnection.Tick(systemClock.UtcNow); // Pause at 3.5s systemClock.UtcNow += TimeSpan.FromSeconds(0.5); _httpConnection.PauseTimingReads(); // Tick at 4s, expected counted time is 4s (first tick after pause goes through), expected data rate is 150 bytes/second systemClock.UtcNow += TimeSpan.FromSeconds(0.5); _httpConnection.Tick(systemClock.UtcNow); // Tick at 6s, expected counted time is 4s, expected data rate is 150 bytes/second systemClock.UtcNow += TimeSpan.FromSeconds(2); _httpConnection.Tick(systemClock.UtcNow); // Not timed out Assert.False(_httpConnection.RequestTimedOut); mockLogger.Verify( logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<double>()), Times.Never); // Resume at 6.5s systemClock.UtcNow += TimeSpan.FromSeconds(0.5); _httpConnection.ResumeTimingReads(); // Tick at 9s, expected counted time is 6s, expected data rate is 100 bytes/second systemClock.UtcNow += TimeSpan.FromSeconds(1.5); _httpConnection.Tick(systemClock.UtcNow); // Not timed out Assert.False(_httpConnection.RequestTimedOut); mockLogger.Verify( logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<double>()), Times.Never); // Tick at 10s, expected counted time is 7s, expected data rate drops below 100 bytes/second systemClock.UtcNow += TimeSpan.FromSeconds(1); _httpConnection.Tick(systemClock.UtcNow); // Timed out Assert.True(_httpConnection.RequestTimedOut); mockLogger.Verify( logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<double>()), Times.Once); } [Fact] public void ReadTimingNotPausedWhenResumeCalledBeforeNextTick() { var systemClock = new MockSystemClock(); _httpConnectionContext.ServiceContext.ServerOptions.Limits.MinRequestBodyDataRate = new MinDataRate(bytesPerSecond: 100, gracePeriod: TimeSpan.FromSeconds(2)); _httpConnectionContext.ServiceContext.SystemClock = systemClock; var mockLogger = new Mock<IKestrelTrace>(); _httpConnectionContext.ServiceContext.Log = mockLogger.Object; _httpConnection.CreateHttp1Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.CreateHttp2Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.Http1Connection.Reset(); // Initialize timestamp _httpConnection.Tick(systemClock.UtcNow); _httpConnection.StartTimingReads(); // Tick at 2s, expected counted time is 2s, expected data rate is 100 bytes/second systemClock.UtcNow += TimeSpan.FromSeconds(2); _httpConnection.BytesRead(200); _httpConnection.Tick(systemClock.UtcNow); // Not timed out Assert.False(_httpConnection.RequestTimedOut); mockLogger.Verify( logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<double>()), Times.Never); // Pause at 2.25s systemClock.UtcNow += TimeSpan.FromSeconds(0.25); _httpConnection.PauseTimingReads(); // Resume at 2.5s systemClock.UtcNow += TimeSpan.FromSeconds(0.25); _httpConnection.ResumeTimingReads(); // Tick at 3s, expected counted time is 3s, expected data rate is 100 bytes/second systemClock.UtcNow += TimeSpan.FromSeconds(0.5); _httpConnection.BytesRead(100); _httpConnection.Tick(systemClock.UtcNow); // Not timed out Assert.False(_httpConnection.RequestTimedOut); mockLogger.Verify( logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<double>()), Times.Never); // Tick at 4s, expected counted time is 4s, expected data rate drops below 100 bytes/second systemClock.UtcNow += TimeSpan.FromSeconds(1); _httpConnection.Tick(systemClock.UtcNow); // Timed out Assert.True(_httpConnection.RequestTimedOut); mockLogger.Verify( logger => logger.RequestBodyMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<double>()), Times.Once); } [Fact] public void ReadTimingNotEnforcedWhenTimeoutIsSet() { var systemClock = new MockSystemClock(); var timeout = TimeSpan.FromSeconds(5); _httpConnectionContext.ServiceContext.ServerOptions.Limits.MinRequestBodyDataRate = new MinDataRate(bytesPerSecond: 100, gracePeriod: TimeSpan.FromSeconds(2)); _httpConnectionContext.ServiceContext.SystemClock = systemClock; var mockLogger = new Mock<IKestrelTrace>(); _httpConnectionContext.ServiceContext.Log = mockLogger.Object; _httpConnection.CreateHttp1Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.CreateHttp2Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.Http1Connection.Reset(); var startTime = systemClock.UtcNow; // Initialize timestamp _httpConnection.Tick(startTime); _httpConnection.StartTimingReads(); _httpConnection.SetTimeout(timeout.Ticks, TimeoutAction.StopProcessingNextRequest); // Tick beyond grace period with low data rate systemClock.UtcNow += TimeSpan.FromSeconds(3); _httpConnection.BytesRead(1); _httpConnection.Tick(systemClock.UtcNow); // Not timed out Assert.False(_httpConnection.RequestTimedOut); // Tick just past timeout period, adjusted by Heartbeat.Interval systemClock.UtcNow = startTime + timeout + Heartbeat.Interval + TimeSpan.FromTicks(1); _httpConnection.Tick(systemClock.UtcNow); // Timed out Assert.True(_httpConnection.RequestTimedOut); } [Fact] public void WriteTimingAbortsConnectionWhenWriteDoesNotCompleteWithMinimumDataRate() { var systemClock = new MockSystemClock(); var aborted = new ManualResetEventSlim(); _httpConnectionContext.ServiceContext.ServerOptions.Limits.MinResponseDataRate = new MinDataRate(bytesPerSecond: 100, gracePeriod: TimeSpan.FromSeconds(2)); _httpConnectionContext.ServiceContext.SystemClock = systemClock; var mockLogger = new Mock<IKestrelTrace>(); _httpConnectionContext.ServiceContext.Log = mockLogger.Object; _httpConnection.CreateHttp1Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.CreateHttp2Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.Http1Connection.Reset(); _httpConnection.Http1Connection.RequestAborted.Register(() => { aborted.Set(); }); // Initialize timestamp _httpConnection.Tick(systemClock.UtcNow); // Should complete within 4 seconds, but the timeout is adjusted by adding Heartbeat.Interval _httpConnection.StartTimingWrite(400); // Tick just past 4s plus Heartbeat.Interval systemClock.UtcNow += TimeSpan.FromSeconds(4) + Heartbeat.Interval + TimeSpan.FromTicks(1); _httpConnection.Tick(systemClock.UtcNow); Assert.True(_httpConnection.RequestTimedOut); Assert.True(aborted.Wait(TimeSpan.FromSeconds(10))); } [Fact] public void WriteTimingAbortsConnectionWhenSmallWriteDoesNotCompleteWithinGracePeriod() { var systemClock = new MockSystemClock(); var minResponseDataRate = new MinDataRate(bytesPerSecond: 100, gracePeriod: TimeSpan.FromSeconds(5)); var aborted = new ManualResetEventSlim(); _httpConnectionContext.ServiceContext.ServerOptions.Limits.MinResponseDataRate = minResponseDataRate; _httpConnectionContext.ServiceContext.SystemClock = systemClock; var mockLogger = new Mock<IKestrelTrace>(); _httpConnectionContext.ServiceContext.Log = mockLogger.Object; _httpConnection.CreateHttp1Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.CreateHttp2Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.Http1Connection.Reset(); _httpConnection.Http1Connection.RequestAborted.Register(() => { aborted.Set(); }); // Initialize timestamp var startTime = systemClock.UtcNow; _httpConnection.Tick(startTime); // Should complete within 1 second, but the timeout is adjusted by adding Heartbeat.Interval _httpConnection.StartTimingWrite(100); // Tick just past 1s plus Heartbeat.Interval systemClock.UtcNow += TimeSpan.FromSeconds(1) + Heartbeat.Interval + TimeSpan.FromTicks(1); _httpConnection.Tick(systemClock.UtcNow); // Still within grace period, not timed out Assert.False(_httpConnection.RequestTimedOut); // Tick just past grace period (adjusted by Heartbeat.Interval) systemClock.UtcNow = startTime + minResponseDataRate.GracePeriod + Heartbeat.Interval + TimeSpan.FromTicks(1); _httpConnection.Tick(systemClock.UtcNow); Assert.True(_httpConnection.RequestTimedOut); Assert.True(aborted.Wait(TimeSpan.FromSeconds(10))); } [Fact] public void WriteTimingTimeoutPushedOnConcurrentWrite() { var systemClock = new MockSystemClock(); var aborted = new ManualResetEventSlim(); _httpConnectionContext.ServiceContext.ServerOptions.Limits.MinResponseDataRate = new MinDataRate(bytesPerSecond: 100, gracePeriod: TimeSpan.FromSeconds(2)); _httpConnectionContext.ServiceContext.SystemClock = systemClock; var mockLogger = new Mock<IKestrelTrace>(); _httpConnectionContext.ServiceContext.Log = mockLogger.Object; _httpConnection.CreateHttp1Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.CreateHttp2Connection(new DummyApplication(), _httpConnectionContext.Transport, _httpConnectionContext.Application); _httpConnection.Http1Connection.Reset(); _httpConnection.Http1Connection.RequestAborted.Register(() => { aborted.Set(); }); // Initialize timestamp _httpConnection.Tick(systemClock.UtcNow); // Should complete within 5 seconds, but the timeout is adjusted by adding Heartbeat.Interval _httpConnection.StartTimingWrite(500); // Start a concurrent write after 3 seconds, which should complete within 3 seconds (adjusted by Heartbeat.Interval) _httpConnection.StartTimingWrite(300); // Tick just past 5s plus Heartbeat.Interval, when the first write should have completed systemClock.UtcNow += TimeSpan.FromSeconds(5) + Heartbeat.Interval + TimeSpan.FromTicks(1); _httpConnection.Tick(systemClock.UtcNow); // Not timed out because the timeout was pushed by the second write Assert.False(_httpConnection.RequestTimedOut); // Complete the first write, this should have no effect on the timeout _httpConnection.StopTimingWrite(); // Tick just past +3s, when the second write should have completed systemClock.UtcNow += TimeSpan.FromSeconds(3) + TimeSpan.FromTicks(1); _httpConnection.Tick(systemClock.UtcNow); Assert.True(_httpConnection.RequestTimedOut); Assert.True(aborted.Wait(TimeSpan.FromSeconds(10))); } } }
1
14,323
Did this remove the BOM or something?
aspnet-KestrelHttpServer
.cs
@@ -99,6 +99,10 @@ public class BlockHeaderValidator<C> { return parent; } + public void addRule(final AttachedBlockHeaderValidationRule<C> rule) { + this.rules.add(new Rule<>(false, rule, rule.includeInLightValidation())); + } + private static class Rule<C> { private final boolean detachedSupported; private final AttachedBlockHeaderValidationRule<C> rule;
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.mainnet; import org.hyperledger.besu.ethereum.ProtocolContext; import org.hyperledger.besu.ethereum.core.BlockHeader; import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.function.Predicate; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; public class BlockHeaderValidator<C> { private static final Logger LOG = LogManager.getLogger(); private final List<Rule<C>> rules; private BlockHeaderValidator(final List<Rule<C>> rules) { this.rules = rules; } public boolean validateHeader( final BlockHeader header, final BlockHeader parent, final ProtocolContext<C> protocolContext, final HeaderValidationMode mode) { switch (mode) { case NONE: return true; case LIGHT_DETACHED_ONLY: return applyRules( header, parent, protocolContext, rule -> rule.includeInLightValidation() && rule.isDetachedSupported()); case LIGHT_SKIP_DETACHED: return applyRules( header, parent, protocolContext, rule -> rule.includeInLightValidation() && !rule.isDetachedSupported()); case LIGHT: return applyRules(header, parent, protocolContext, Rule::includeInLightValidation); case DETACHED_ONLY: return applyRules(header, parent, protocolContext, Rule::isDetachedSupported); case SKIP_DETACHED: return applyRules(header, parent, protocolContext, rule -> !rule.isDetachedSupported()); case FULL: return applyRules(header, parent, protocolContext, rule -> true); } throw new IllegalArgumentException("Unknown HeaderValidationMode: " + mode); } public boolean validateHeader( final BlockHeader header, final ProtocolContext<C> protocolContext, final HeaderValidationMode mode) { if (mode == HeaderValidationMode.NONE) { return true; } return getParent(header, protocolContext) .map(parentHeader -> validateHeader(header, parentHeader, protocolContext, mode)) .orElse(false); } private boolean applyRules( final BlockHeader header, final BlockHeader parent, final ProtocolContext<C> protocolContext, final Predicate<Rule<C>> filter) { return rules.stream() .filter(filter) .allMatch(rule -> rule.validate(header, parent, protocolContext)); } private Optional<BlockHeader> getParent( final BlockHeader header, final ProtocolContext<C> context) { final Optional<BlockHeader> parent = context.getBlockchain().getBlockHeader(header.getParentHash()); if (!parent.isPresent()) { LOG.trace("Invalid block header: cannot determine parent header"); } return parent; } private static class Rule<C> { private final boolean detachedSupported; private final AttachedBlockHeaderValidationRule<C> rule; private final boolean includeInLightValidation; private Rule( final boolean detachedSupported, final AttachedBlockHeaderValidationRule<C> rule, final boolean includeInLightValidation) { this.detachedSupported = detachedSupported; this.rule = rule; this.includeInLightValidation = includeInLightValidation; } public boolean isDetachedSupported() { return detachedSupported; } public boolean validate( final BlockHeader header, final BlockHeader parent, final ProtocolContext<C> protocolContext) { return this.rule.validate(header, parent, protocolContext); } public boolean includeInLightValidation() { return includeInLightValidation; } } public static class Builder<C> { private final List<Rule<C>> rules = new ArrayList<>(); public Builder<C> addRule(final AttachedBlockHeaderValidationRule<C> rule) { this.rules.add(new Rule<>(false, rule, rule.includeInLightValidation())); return this; } public Builder<C> addRule(final DetachedBlockHeaderValidationRule rule) { this.rules.add( new Rule<>( true, (header, parent, protocolContext) -> rule.validate(header, parent), rule.includeInLightValidation())); return this; } public BlockHeaderValidator<C> build() { return new BlockHeaderValidator<>(rules); } } }
1
22,069
This makes the BlockHeaderValidator mutable, and IMHO should not be done. Note we have a builder that has the exact same method, so instead of mutating a produced BlockHeaderValidator we should hook into wherever the builder is being created.
hyperledger-besu
java
@@ -383,7 +383,7 @@ def outputDeviceNameToID(name, useDefaultIfInvalid=False): fileWavePlayer = None fileWavePlayerThread=None -def playWaveFile(fileName, async=True): +def playWaveFile(fileName, asynchronous=True): """plays a specified wave file. """ global fileWavePlayer, fileWavePlayerThread
1
#nvwave.py #A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2007-2017 NV Access Limited, Aleksey Sadovoy #This file is covered by the GNU General Public License. #See the file COPYING for more details. """Provides a simple Python interface to playing audio using the Windows multimedia waveOut functions, as well as other useful utilities. """ import threading from ctypes import * from ctypes.wintypes import * import time import atexit import wx import winKernel import wave import config from logHandler import log __all__ = ( "WavePlayer", "getOutputDeviceNames", "outputDeviceIDToName", "outputDeviceNameToID", ) winmm = windll.winmm HWAVEOUT = HANDLE LPHWAVEOUT = POINTER(HWAVEOUT) class WAVEFORMATEX(Structure): _fields_ = [ ("wFormatTag", WORD), ("nChannels", WORD), ("nSamplesPerSec", DWORD), ("nAvgBytesPerSec", DWORD), ("nBlockAlign", WORD), ("wBitsPerSample", WORD), ("cbSize", WORD) ] LPWAVEFORMATEX = POINTER(WAVEFORMATEX) class WAVEHDR(Structure): pass LPWAVEHDR = POINTER(WAVEHDR) WAVEHDR._fields_ = [ ("lpData", LPSTR), ("dwBufferLength", DWORD), ("dwBytesRecorded", DWORD), ("dwUser", DWORD), ("dwFlags", DWORD), ("dwLoops", DWORD), ("lpNext", LPWAVEHDR), ("reserved", DWORD) ] WHDR_DONE = 1 WAVE_FORMAT_PCM = 1 WAVE_MAPPER = -1 MMSYSERR_NOERROR = 0 CALLBACK_NULL = 0 #CALLBACK_FUNCTION = 0x30000 CALLBACK_EVENT = 0x50000 #waveOutProc = CFUNCTYPE(HANDLE, UINT, DWORD, DWORD, DWORD) #WOM_DONE = 0x3bd MAXPNAMELEN = 32 class WAVEOUTCAPS(Structure): _fields_ = [ ('wMid', WORD), ('wPid', WORD), ('vDriverVersion', c_uint), ('szPname', WCHAR*MAXPNAMELEN), ('dwFormats', DWORD), ('wChannels', WORD), ('wReserved1', WORD), ('dwSupport', DWORD), ] # Set argument types. winmm.waveOutOpen.argtypes = (LPHWAVEOUT, UINT, LPWAVEFORMATEX, DWORD, DWORD, DWORD) # Initialize error checking. def _winmm_errcheck(res, func, args): if res != MMSYSERR_NOERROR: buf = create_unicode_buffer(256) winmm.waveOutGetErrorTextW(res, buf, sizeof(buf)) raise WindowsError(res, buf.value) for func in ( winmm.waveOutOpen, winmm.waveOutPrepareHeader, winmm.waveOutWrite, winmm.waveOutUnprepareHeader, winmm.waveOutPause, winmm.waveOutRestart, winmm.waveOutReset, winmm.waveOutClose, winmm.waveOutGetDevCapsW ): func.errcheck = _winmm_errcheck class WavePlayer(object): """Synchronously play a stream of audio. To use, construct an instance and feed it waveform audio using L{feed}. """ #: Minimum length of buffer (in ms) before audio is played. MIN_BUFFER_MS = 300 #: Flag used to signal that L{stop} has been called. STOPPING = "stopping" #: A lock to prevent WaveOut* functions from being called simultaneously, as this can cause problems even if they are for different HWAVEOUTs. _global_waveout_lock = threading.RLock() _audioDucker=None def __init__(self, channels, samplesPerSec, bitsPerSample, outputDevice=WAVE_MAPPER, closeWhenIdle=True, wantDucking=True, buffered=False ): """Constructor. @param channels: The number of channels of audio; e.g. 2 for stereo, 1 for mono. @type channels: int @param samplesPerSec: Samples per second (hz). @type samplesPerSec: int @param bitsPerSample: The number of bits per sample. @type bitsPerSample: int @param outputDevice: The device ID or name of the audio output device to use. @type outputDevice: int or basestring @param closeWhenIdle: If C{True}, close the output device when no audio is being played. @type closeWhenIdle: bool @param wantDucking: if true then background audio will be ducked on Windows 8 and higher @type wantDucking: bool @param buffered: Whether to buffer small chunks of audio to prevent audio glitches. @type buffered: bool @note: If C{outputDevice} is a name and no such device exists, the default device will be used. @raise WindowsError: If there was an error opening the audio output device. """ self.channels=channels self.samplesPerSec=samplesPerSec self.bitsPerSample=bitsPerSample if isinstance(outputDevice, basestring): outputDevice = outputDeviceNameToID(outputDevice, True) self.outputDeviceID = outputDevice if wantDucking: import audioDucking if audioDucking.isAudioDuckingSupported(): self._audioDucker=audioDucking.AudioDucker() #: If C{True}, close the output device when no audio is being played. #: @type: bool self.closeWhenIdle = closeWhenIdle if buffered: #: Minimum size of the buffer before audio is played. #: However, this is ignored if an C{onDone} callback is provided to L{feed}. BITS_PER_BYTE = 8 MS_PER_SEC = 1000 self._minBufferSize = samplesPerSec * channels * (bitsPerSample / BITS_PER_BYTE) / MS_PER_SEC * self.MIN_BUFFER_MS self._buffer = "" else: self._minBufferSize = None #: Function to call when the previous chunk of audio has finished playing. self._prevOnDone = None self._waveout = None self._waveout_event = winKernel.kernel32.CreateEventW(None, False, False, None) self._waveout_lock = threading.RLock() self._lock = threading.RLock() self.open() def open(self): """Open the output device. This will be called automatically when required. It is not an error if the output device is already open. """ with self._waveout_lock: if self._waveout: return wfx = WAVEFORMATEX() wfx.wFormatTag = WAVE_FORMAT_PCM wfx.nChannels = self.channels wfx.nSamplesPerSec = self.samplesPerSec wfx.wBitsPerSample = self.bitsPerSample wfx.nBlockAlign = self.bitsPerSample / 8 * self.channels wfx.nAvgBytesPerSec = self.samplesPerSec * wfx.nBlockAlign waveout = HWAVEOUT(0) with self._global_waveout_lock: winmm.waveOutOpen(byref(waveout), self.outputDeviceID, LPWAVEFORMATEX(wfx), self._waveout_event, 0, CALLBACK_EVENT) self._waveout = waveout.value self._prev_whdr = None def feed(self, data, onDone=None): """Feed a chunk of audio data to be played. This is normally synchronous. However, synchronisation occurs on the previous chunk, rather than the current chunk; i.e. calling this while no audio is playing will begin playing the chunk but return immediately. This allows for uninterrupted playback as long as a new chunk is fed before the previous chunk has finished playing. @param data: Waveform audio in the format specified when this instance was constructed. @type data: str @param onDone: Function to call when this chunk has finished playing. @type onDone: callable @raise WindowsError: If there was an error playing the audio. """ if not self._minBufferSize: return self._feedUnbuffered(data, onDone=onDone) self._buffer += data # If onDone was specified, we must play audio regardless of the minimum buffer size # so we can accurately call onDone at the end of this chunk. if onDone or len(self._buffer) > self._minBufferSize: self._feedUnbuffered(self._buffer, onDone=onDone) self._buffer = "" def _feedUnbuffered(self, data, onDone=None): if self._audioDucker and not self._audioDucker.enable(): return whdr = WAVEHDR() whdr.lpData = data whdr.dwBufferLength = len(data) with self._lock: with self._waveout_lock: self.open() with self._global_waveout_lock: winmm.waveOutPrepareHeader(self._waveout, LPWAVEHDR(whdr), sizeof(WAVEHDR)) try: with self._global_waveout_lock: winmm.waveOutWrite(self._waveout, LPWAVEHDR(whdr), sizeof(WAVEHDR)) except WindowsError, e: self.close() raise e self.sync() self._prev_whdr = whdr # Don't call onDone if stop was called, # as this chunk has been truncated in that case. if self._prevOnDone is not self.STOPPING: self._prevOnDone = onDone def sync(self): """Synchronise with playback. This method blocks until the previously fed chunk of audio has finished playing. It is called automatically by L{feed}, so usually need not be called directly by the user. """ with self._lock: if not self._prev_whdr: return assert self._waveout, "waveOut None before wait" while not (self._prev_whdr.dwFlags & WHDR_DONE): winKernel.waitForSingleObject(self._waveout_event, winKernel.INFINITE) with self._waveout_lock: assert self._waveout, "waveOut None after wait" with self._global_waveout_lock: winmm.waveOutUnprepareHeader(self._waveout, LPWAVEHDR(self._prev_whdr), sizeof(WAVEHDR)) self._prev_whdr = None if self._prevOnDone is not None and self._prevOnDone is not self.STOPPING: try: self._prevOnDone() except: log.exception("Error calling onDone") self._prevOnDone = None def pause(self, switch): """Pause or unpause playback. @param switch: C{True} to pause playback, C{False} to unpause. @type switch: bool """ if self._audioDucker and self._waveout: if switch: self._audioDucker.disable() else: self._audioDucker.enable() with self._waveout_lock: if not self._waveout: return if switch: with self._global_waveout_lock: winmm.waveOutPause(self._waveout) else: with self._global_waveout_lock: winmm.waveOutRestart(self._waveout) def idle(self): """Indicate that this player is now idle; i.e. the current continuous segment of audio is complete. This will first call L{sync} to synchronise with playback. If L{closeWhenIdle} is C{True}, the output device will be closed. A subsequent call to L{feed} will reopen it. """ if not self._minBufferSize: return self._idleUnbuffered() if self._buffer: self._feedUnbuffered(self._buffer) self._buffer = "" return self._idleUnbuffered() def _idleUnbuffered(self): with self._lock: self.sync() with self._waveout_lock: if not self._waveout: return if self.closeWhenIdle: self._close() if self._audioDucker: self._audioDucker.disable() def stop(self): """Stop playback. """ if self._audioDucker: self._audioDucker.disable() if self._minBufferSize: self._buffer = "" with self._waveout_lock: if not self._waveout: return self._prevOnDone = self.STOPPING try: with self._global_waveout_lock: # Pausing first seems to make waveOutReset respond faster on some systems. winmm.waveOutPause(self._waveout) winmm.waveOutReset(self._waveout) except WindowsError: # waveOutReset seems to fail randomly on some systems. pass # Unprepare the previous buffer and close the output device if appropriate. self._idleUnbuffered() self._prevOnDone = None def close(self): """Close the output device. """ self.stop() with self._lock: with self._waveout_lock: if not self._waveout: return self._close() def _close(self): with self._global_waveout_lock: winmm.waveOutClose(self._waveout) self._waveout = None def __del__(self): self.close() winKernel.kernel32.CloseHandle(self._waveout_event) self._waveout_event = None def _getOutputDevices(): caps = WAVEOUTCAPS() for devID in xrange(-1, winmm.waveOutGetNumDevs()): try: winmm.waveOutGetDevCapsW(devID, byref(caps), sizeof(caps)) yield devID, caps.szPname except WindowsError: # It seems that in certain cases, Windows includes devices which cannot be accessed. pass def getOutputDeviceNames(): """Obtain the names of all audio output devices on the system. @return: The names of all output devices on the system. @rtype: [str, ...] """ return [name for ID, name in _getOutputDevices()] def outputDeviceIDToName(ID): """Obtain the name of an output device given its device ID. @param ID: The device ID. @type ID: int @return: The device name. @rtype: str """ caps = WAVEOUTCAPS() try: winmm.waveOutGetDevCapsW(ID, byref(caps), sizeof(caps)) except WindowsError: raise LookupError("No such device ID") return caps.szPname def outputDeviceNameToID(name, useDefaultIfInvalid=False): """Obtain the device ID of an output device given its name. @param name: The device name. @type name: str @param useDefaultIfInvalid: C{True} to use the default device (wave mapper) if there is no such device, C{False} to raise an exception. @return: The device ID. @rtype: int @raise LookupError: If there is no such device and C{useDefaultIfInvalid} is C{False}. """ for curID, curName in _getOutputDevices(): if curName == name: return curID # No such ID. if useDefaultIfInvalid: return WAVE_MAPPER else: raise LookupError("No such device name") fileWavePlayer = None fileWavePlayerThread=None def playWaveFile(fileName, async=True): """plays a specified wave file. """ global fileWavePlayer, fileWavePlayerThread f = wave.open(fileName,"r") if f is None: raise RuntimeError("can not open file %s"%fileName) if fileWavePlayer is not None: fileWavePlayer.stop() fileWavePlayer = WavePlayer(channels=f.getnchannels(), samplesPerSec=f.getframerate(),bitsPerSample=f.getsampwidth()*8, outputDevice=config.conf["speech"]["outputDevice"],wantDucking=False) fileWavePlayer.feed(f.readframes(f.getnframes())) if async: if fileWavePlayerThread is not None: fileWavePlayerThread.join() fileWavePlayerThread=threading.Thread(target=fileWavePlayer.idle) fileWavePlayerThread.start() else: fileWavePlayer.idle() # When exiting, ensure fileWavePlayer is deleted before modules get cleaned up. # Otherwise, WavePlayer.__del__ will fail with an exception. @atexit.register def _cleanup(): global fileWavePlayer, fileWavePlayerThread fileWavePlayer = None fileWavePlayerThread = None
1
23,142
May be add information about parameters to the doc string while at it?
nvaccess-nvda
py
@@ -43,8 +43,8 @@ public enum RpcMethod { PRIV_DELETE_PRIVACY_GROUP("priv_deletePrivacyGroup"), PRIV_FIND_PRIVACY_GROUP("priv_findPrivacyGroup"), PRIV_DISTRIBUTE_RAW_TRANSACTION("priv_distributeRawTransaction"), + PRIV_GET_TRANSACTION_COUNT_LEGACY("priv_getTransactionCountLegacy"), EEA_SEND_RAW_TRANSACTION("eea_sendRawTransaction"), - EEA_GET_TRANSACTION_COUNT("eea_getTransactionCount"), ETH_ACCOUNTS("eth_accounts"), ETH_BLOCK_NUMBER("eth_blockNumber"), ETH_CALL("eth_call"),
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.api.jsonrpc; import java.util.ArrayList; import java.util.Collection; public enum RpcMethod { ADMIN_ADD_PEER("admin_addPeer"), ADMIN_NODE_INFO("admin_nodeInfo"), ADMIN_PEERS("admin_peers"), ADMIN_REMOVE_PEER("admin_removePeer"), ADMIN_CHANGE_LOG_LEVEL("admin_changeLogLevel"), CLIQUE_DISCARD("clique_discard"), CLIQUE_GET_SIGNERS("clique_getSigners"), CLIQUE_GET_SIGNERS_AT_HASH("clique_getSignersAtHash"), CLIQUE_GET_PROPOSALS("clique_proposals"), CLIQUE_PROPOSE("clique_propose"), CLIQUE_GET_SIGNER_METRICS("clique_getSignerMetrics"), DEBUG_METRICS("debug_metrics"), DEBUG_STORAGE_RANGE_AT("debug_storageRangeAt"), DEBUG_TRACE_BLOCK("debug_traceBlock"), DEBUG_TRACE_BLOCK_BY_HASH("debug_traceBlockByHash"), DEBUG_TRACE_BLOCK_BY_NUMBER("debug_traceBlockByNumber"), DEBUG_TRACE_TRANSACTION("debug_traceTransaction"), PRIV_GET_PRIVATE_TRANSACTION("priv_getPrivateTransaction"), PRIV_GET_TRANSACTION_COUNT("priv_getTransactionCount"), PRIV_GET_PRIVACY_PRECOMPILE_ADDRESS("priv_getPrivacyPrecompileAddress"), PRIV_GET_TRANSACTION_RECEIPT("priv_getTransactionReceipt"), PRIV_CREATE_PRIVACY_GROUP("priv_createPrivacyGroup"), PRIV_DELETE_PRIVACY_GROUP("priv_deletePrivacyGroup"), PRIV_FIND_PRIVACY_GROUP("priv_findPrivacyGroup"), PRIV_DISTRIBUTE_RAW_TRANSACTION("priv_distributeRawTransaction"), EEA_SEND_RAW_TRANSACTION("eea_sendRawTransaction"), EEA_GET_TRANSACTION_COUNT("eea_getTransactionCount"), ETH_ACCOUNTS("eth_accounts"), ETH_BLOCK_NUMBER("eth_blockNumber"), ETH_CALL("eth_call"), ETH_CHAIN_ID("eth_chainId"), ETH_COINBASE("eth_coinbase"), ETH_ESTIMATE_GAS("eth_estimateGas"), ETH_GAS_PRICE("eth_gasPrice"), ETH_GET_BALANCE("eth_getBalance"), ETH_GET_BLOCK_BY_HASH("eth_getBlockByHash"), ETH_GET_BLOCK_BY_NUMBER("eth_getBlockByNumber"), ETH_GET_BLOCK_TRANSACTION_COUNT_BY_HASH("eth_getBlockTransactionCountByHash"), ETH_GET_BLOCK_TRANSACTION_COUNT_BY_NUMBER("eth_getBlockTransactionCountByNumber"), ETH_GET_CODE("eth_getCode"), ETH_GET_FILTER_CHANGES("eth_getFilterChanges"), ETH_GET_FILTER_LOGS("eth_getFilterLogs"), ETH_GET_LOGS("eth_getLogs"), ETH_GET_PROOF("eth_getProof"), ETH_GET_STORAGE_AT("eth_getStorageAt"), ETH_GET_TRANSACTION_BY_BLOCK_HASH_AND_INDEX("eth_getTransactionByBlockHashAndIndex"), ETH_GET_TRANSACTION_BY_BLOCK_NUMBER_AND_INDEX("eth_getTransactionByBlockNumberAndIndex"), ETH_GET_TRANSACTION_BY_HASH("eth_getTransactionByHash"), ETH_GET_TRANSACTION_COUNT("eth_getTransactionCount"), ETH_GET_TRANSACTION_RECEIPT("eth_getTransactionReceipt"), ETH_GET_UNCLE_BY_BLOCK_HASH_AND_INDEX("eth_getUncleByBlockHashAndIndex"), ETH_GET_UNCLE_BY_BLOCK_NUMBER_AND_INDEX("eth_getUncleByBlockNumberAndIndex"), ETH_GET_UNCLE_COUNT_BY_BLOCK_HASH("eth_getUncleCountByBlockHash"), ETH_GET_UNCLE_COUNT_BY_BLOCK_NUMBER("eth_getUncleCountByBlockNumber"), ETH_GET_WORK("eth_getWork"), ETH_HASHRATE("eth_hashrate"), ETH_MINING("eth_mining"), ETH_NEW_BLOCK_FILTER("eth_newBlockFilter"), ETH_NEW_FILTER("eth_newFilter"), ETH_NEW_PENDING_TRANSACTION_FILTER("eth_newPendingTransactionFilter"), ETH_PROTOCOL_VERSION("eth_protocolVersion"), ETH_SEND_RAW_TRANSACTION("eth_sendRawTransaction"), ETH_SEND_TRANSACTION("eth_sendTransaction"), ETH_SUBSCRIBE("eth_subscribe"), ETH_SYNCING("eth_syncing"), ETH_UNINSTALL_FILTER("eth_uninstallFilter"), ETH_UNSUBSCRIBE("eth_unsubscribe"), IBFT_DISCARD_VALIDATOR_VOTE("ibft_discardValidatorVote"), IBFT_GET_PENDING_VOTES("ibft_getPendingVotes"), IBFT_GET_VALIDATORS_BY_BLOCK_HASH("ibft_getValidatorsByBlockHash"), IBFT_GET_VALIDATORS_BY_BLOCK_NUMBER("ibft_getValidatorsByBlockNumber"), IBFT_PROPOSE_VALIDATOR_VOTE("ibft_proposeValidatorVote"), IBFT_GET_SIGNER_METRICS("ibft_getSignerMetrics"), MINER_SET_COINBASE("miner_setCoinbase"), MINER_SET_ETHERBASE("miner_setEtherbase"), MINER_START("miner_start"), MINER_STOP("miner_stop"), NET_ENODE("net_enode"), NET_LISTENING("net_listening"), NET_PEER_COUNT("net_peerCount"), NET_SERVICES("net_services"), NET_VERSION("net_version"), PERM_ADD_ACCOUNTS_TO_WHITELIST("perm_addAccountsToWhitelist"), PERM_ADD_NODES_TO_WHITELIST("perm_addNodesToWhitelist"), PERM_GET_ACCOUNTS_WHITELIST("perm_getAccountsWhitelist"), PERM_GET_NODES_WHITELIST("perm_getNodesWhitelist"), PERM_RELOAD_PERMISSIONS_FROM_FILE("perm_reloadPermissionsFromFile"), PERM_REMOVE_ACCOUNTS_FROM_WHITELIST("perm_removeAccountsFromWhitelist"), PERM_REMOVE_NODES_FROM_WHITELIST("perm_removeNodesFromWhitelist"), RPC_MODULES("rpc_modules"), TRACE_REPLAY_BLOCK_TRANSACTIONS("trace_replayBlockTransactions"), TX_POOL_BESU_STATISTICS("txpool_besuStatistics"), TX_POOL_BESU_TRANSACTIONS("txpool_besuTransactions"), WEB3_CLIENT_VERSION("web3_clientVersion"), WEB3_SHA3("web3_sha3"); private final String methodName; private static Collection<String> allMethodNames; public String getMethodName() { return methodName; } static { allMethodNames = new ArrayList<>(); for (RpcMethod m : RpcMethod.values()) { allMethodNames.add(m.getMethodName()); } } RpcMethod(final String methodName) { this.methodName = methodName; } public static boolean rpcMethodExists(final String rpcMethodName) { return allMethodNames.contains(rpcMethodName); } }
1
20,045
Naming of this sounds a bit awkward. Perhaps priv_legacy_getTransactionCount or priv_legacyGetTransactionCount?
hyperledger-besu
java
@@ -3901,9 +3901,6 @@ void CoreChecks::PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory mem, co // Clear mem binding for any bound objects for (auto obj : mem_info->obj_bindings) { - log_msg(report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle, - kVUID_Core_MemTrack_FreedMemRef, "VK Object %s still has a reference to mem obj %s.", - report_data->FormatHandle(obj.handle).c_str(), report_data->FormatHandle(mem_info->mem).c_str()); BINDABLE *bindable_state = nullptr; switch (obj.type) { case kVulkanObjectTypeImage:
1
/* Copyright (c) 2015-2019 The Khronos Group Inc. * Copyright (c) 2015-2019 Valve Corporation * Copyright (c) 2015-2019 LunarG, Inc. * Copyright (C) 2015-2019 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Cody Northrop <[email protected]> * Author: Michael Lentine <[email protected]> * Author: Tobin Ehlis <[email protected]> * Author: Chia-I Wu <[email protected]> * Author: Chris Forbes <[email protected]> * Author: Mark Lobodzinski <[email protected]> * Author: Ian Elliott <[email protected]> * Author: Dave Houlton <[email protected]> * Author: Dustin Graves <[email protected]> * Author: Jeremy Hayes <[email protected]> * Author: Jon Ashburn <[email protected]> * Author: Karl Schultz <[email protected]> * Author: Mark Young <[email protected]> * Author: Mike Schuchardt <[email protected]> * Author: Mike Weiblen <[email protected]> * Author: Tony Barbour <[email protected]> * Author: John Zulauf <[email protected]> * Author: Shannon McPherson <[email protected]> */ // Allow use of STL min and max functions in Windows #define NOMINMAX #include <algorithm> #include <array> #include <assert.h> #include <cmath> #include <iostream> #include <list> #include <map> #include <memory> #include <mutex> #include <set> #include <sstream> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <string> #include <valarray> #include "vk_loader_platform.h" #include "vk_dispatch_table_helper.h" #include "vk_enum_string_helper.h" #include "chassis.h" #include "convert_to_renderpass2.h" #include "core_validation.h" #include "buffer_validation.h" #include "shader_validation.h" #include "vk_layer_utils.h" // These functions are defined *outside* the core_validation namespace as their type // is also defined outside that namespace size_t PipelineLayoutCompatDef::hash() const { hash_util::HashCombiner hc; // The set number is integral to the CompatDef's distinctiveness hc << set << push_constant_ranges.get(); const auto &descriptor_set_layouts = *set_layouts_id.get(); for (uint32_t i = 0; i <= set; i++) { hc << descriptor_set_layouts[i].get(); } return hc.Value(); } bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const { if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) { return false; } if (set_layouts_id == other.set_layouts_id) { // if it's the same set_layouts_id, then *any* subset will match return true; } // They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match const auto &descriptor_set_layouts = *set_layouts_id.get(); assert(set < descriptor_set_layouts.size()); const auto &other_ds_layouts = *other.set_layouts_id.get(); assert(set < other_ds_layouts.size()); for (uint32_t i = 0; i <= set; i++) { if (descriptor_set_layouts[i] != other_ds_layouts[i]) { return false; } } return true; } using std::max; using std::string; using std::stringstream; using std::unique_ptr; using std::unordered_map; using std::unordered_set; using std::vector; // WSI Image Objects bypass usual Image Object creation methods. A special Memory // Object value will be used to identify them internally. static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1); // 2nd special memory handle used to flag object as unbound from memory static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1); // Return buffer state ptr for specified buffer or else NULL BUFFER_STATE *CoreChecks::GetBufferState(VkBuffer buffer) { auto buff_it = bufferMap.find(buffer); if (buff_it == bufferMap.end()) { return nullptr; } return buff_it->second.get(); } // Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL IMAGE_VIEW_STATE *CoreChecks::GetImageViewState(VkImageView image_view) { auto iv_it = imageViewMap.find(image_view); if (iv_it == imageViewMap.end()) { return nullptr; } return iv_it->second.get(); } // Get the global map of pending releases GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap( const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) { return qfo_release_image_barrier_map; } GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap( const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) { return qfo_release_buffer_barrier_map; } // Get the image viewstate for a given framebuffer attachment IMAGE_VIEW_STATE *CoreChecks::GetAttachmentImageViewState(FRAMEBUFFER_STATE *framebuffer, uint32_t index) { assert(framebuffer && (index < framebuffer->createInfo.attachmentCount)); const VkImageView &image_view = framebuffer->createInfo.pAttachments[index]; return GetImageViewState(image_view); } // Return sampler node ptr for specified sampler or else NULL SAMPLER_STATE *CoreChecks::GetSamplerState(VkSampler sampler) { auto sampler_it = samplerMap.find(sampler); if (sampler_it == samplerMap.end()) { return nullptr; } return sampler_it->second.get(); } // Return image state ptr for specified image or else NULL IMAGE_STATE *CoreChecks::GetImageState(VkImage image) { auto img_it = imageMap.find(image); if (img_it == imageMap.end()) { return nullptr; } return img_it->second.get(); } // Return swapchain node for specified swapchain or else NULL SWAPCHAIN_NODE *CoreChecks::GetSwapchainNode(VkSwapchainKHR swapchain) { auto swp_it = swapchainMap.find(swapchain); if (swp_it == swapchainMap.end()) { return nullptr; } return swp_it->second.get(); } // Return buffer node ptr for specified buffer or else NULL BUFFER_VIEW_STATE *CoreChecks::GetBufferViewState(VkBufferView buffer_view) { auto bv_it = bufferViewMap.find(buffer_view); if (bv_it == bufferViewMap.end()) { return nullptr; } return bv_it->second.get(); } FENCE_NODE *CoreChecks::GetFenceNode(VkFence fence) { auto it = fenceMap.find(fence); if (it == fenceMap.end()) { return nullptr; } return &it->second; } EVENT_STATE *CoreChecks::GetEventNode(VkEvent event) { auto it = eventMap.find(event); if (it == eventMap.end()) { return nullptr; } return &it->second; } QUERY_POOL_NODE *CoreChecks::GetQueryPoolNode(VkQueryPool query_pool) { auto it = queryPoolMap.find(query_pool); if (it == queryPoolMap.end()) { return nullptr; } return &it->second; } QUEUE_STATE *CoreChecks::GetQueueState(VkQueue queue) { auto it = queueMap.find(queue); if (it == queueMap.end()) { return nullptr; } return &it->second; } SEMAPHORE_NODE *CoreChecks::GetSemaphoreNode(VkSemaphore semaphore) { auto it = semaphoreMap.find(semaphore); if (it == semaphoreMap.end()) { return nullptr; } return &it->second; } COMMAND_POOL_NODE *CoreChecks::GetCommandPoolNode(VkCommandPool pool) { auto it = commandPoolMap.find(pool); if (it == commandPoolMap.end()) { return nullptr; } return &it->second; } PHYSICAL_DEVICE_STATE *CoreChecks::GetPhysicalDeviceState(VkPhysicalDevice phys) { auto *phys_dev_map = ((physical_device_map.size() > 0) ? &physical_device_map : &instance_state->physical_device_map); auto it = phys_dev_map->find(phys); if (it == phys_dev_map->end()) { return nullptr; } return &it->second; } PHYSICAL_DEVICE_STATE *CoreChecks::GetPhysicalDeviceState() { return physical_device_state; } SURFACE_STATE *CoreChecks::GetSurfaceState(VkSurfaceKHR surface) { auto *surf_map = ((surface_map.size() > 0) ? &surface_map : &instance_state->surface_map); auto it = surf_map->find(surface); if (it == surf_map->end()) { return nullptr; } return &it->second; } // Return ptr to memory binding for given handle of specified type BINDABLE *CoreChecks::GetObjectMemBinding(uint64_t handle, VulkanObjectType type) { switch (type) { case kVulkanObjectTypeImage: return GetImageState(VkImage(handle)); case kVulkanObjectTypeBuffer: return GetBufferState(VkBuffer(handle)); default: break; } return nullptr; } std::unordered_map<VkSamplerYcbcrConversion, uint64_t> *CoreChecks::GetYcbcrConversionFormatMap() { return &ycbcr_conversion_ahb_fmt_map; } std::unordered_set<uint64_t> *CoreChecks::GetAHBExternalFormatsSet() { return &ahb_ext_formats_set; } ImageSubresourceLayoutMap::InitialLayoutState::InitialLayoutState(const GLOBAL_CB_NODE &cb_state, const IMAGE_VIEW_STATE *view_state) : image_view(VK_NULL_HANDLE), aspect_mask(0), label(cb_state.debug_label) { if (view_state) { image_view = view_state->image_view; aspect_mask = view_state->create_info.subresourceRange.aspectMask; } } std::string FormatDebugLabel(const char *prefix, const LoggingLabel &label) { if (label.Empty()) return std::string(); std::string out; string_sprintf(&out, "%sVkDebugUtilsLabel(name='%s' color=[%g, %g %g, %g])", prefix, label.name.c_str(), label.color[0], label.color[1], label.color[2], label.color[3]); return out; } // the ImageLayoutMap implementation bakes in the number of valid aspects -- we have to choose the correct one at construction time template <uint32_t kThreshold> static std::unique_ptr<ImageSubresourceLayoutMap> LayoutMapFactoryByAspect(const IMAGE_STATE &image_state) { ImageSubresourceLayoutMap *map = nullptr; switch (image_state.full_range.aspectMask) { case VK_IMAGE_ASPECT_COLOR_BIT: map = new ImageSubresourceLayoutMapImpl<ColorAspectTraits, kThreshold>(image_state); break; case VK_IMAGE_ASPECT_DEPTH_BIT: map = new ImageSubresourceLayoutMapImpl<DepthAspectTraits, kThreshold>(image_state); break; case VK_IMAGE_ASPECT_STENCIL_BIT: map = new ImageSubresourceLayoutMapImpl<StencilAspectTraits, kThreshold>(image_state); break; case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT: map = new ImageSubresourceLayoutMapImpl<DepthStencilAspectTraits, kThreshold>(image_state); break; case VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT: map = new ImageSubresourceLayoutMapImpl<Multiplane2AspectTraits, kThreshold>(image_state); break; case VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT: map = new ImageSubresourceLayoutMapImpl<Multiplane3AspectTraits, kThreshold>(image_state); break; } assert(map); // We shouldn't be able to get here null unless the traits cases are incomplete return std::unique_ptr<ImageSubresourceLayoutMap>(map); } static std::unique_ptr<ImageSubresourceLayoutMap> LayoutMapFactory(const IMAGE_STATE &image_state) { std::unique_ptr<ImageSubresourceLayoutMap> map; const uint32_t kAlwaysDenseLimit = 16; // About a cacheline on deskop architectures if (image_state.full_range.layerCount <= kAlwaysDenseLimit) { // Create a dense row map map = LayoutMapFactoryByAspect<0>(image_state); } else { // Create an initially sparse row map map = LayoutMapFactoryByAspect<kAlwaysDenseLimit>(image_state); } return map; } // The const variant only need the image as it is the key for the map const ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(const GLOBAL_CB_NODE *cb_state, VkImage image) { auto it = cb_state->image_layout_map.find(image); if (it == cb_state->image_layout_map.cend()) { return nullptr; } return it->second.get(); } // The non-const variant only needs the image state, as the factory requires it to construct a new entry ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(GLOBAL_CB_NODE *cb_state, const IMAGE_STATE &image_state) { auto it = cb_state->image_layout_map.find(image_state.image); if (it == cb_state->image_layout_map.end()) { // Empty slot... fill it in. auto insert_pair = cb_state->image_layout_map.insert(std::make_pair(image_state.image, LayoutMapFactory(image_state))); assert(insert_pair.second); ImageSubresourceLayoutMap *new_map = insert_pair.first->second.get(); assert(new_map); return new_map; } return it->second.get(); } // Return ptr to info in map container containing mem, or NULL if not found // Calls to this function should be wrapped in mutex DEVICE_MEM_INFO *CoreChecks::GetMemObjInfo(const VkDeviceMemory mem) { auto mem_it = memObjMap.find(mem); if (mem_it == memObjMap.end()) { return NULL; } return mem_it->second.get(); } void CoreChecks::AddMemObjInfo(void *object, const VkDeviceMemory mem, const VkMemoryAllocateInfo *pAllocateInfo) { assert(object != NULL); auto *mem_info = new DEVICE_MEM_INFO(object, mem, pAllocateInfo); memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(mem_info); auto dedicated = lvl_find_in_chain<VkMemoryDedicatedAllocateInfoKHR>(pAllocateInfo->pNext); if (dedicated) { mem_info->is_dedicated = true; mem_info->dedicated_buffer = dedicated->buffer; mem_info->dedicated_image = dedicated->image; } auto export_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(pAllocateInfo->pNext); if (export_info) { mem_info->is_export = true; mem_info->export_handle_type_flags = export_info->handleTypes; } } // Create binding link between given sampler and command buffer node void CoreChecks::AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) { auto inserted = cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler}); if (inserted.second) { // Only need to complete the cross-reference if this is a new item sampler_state->cb_bindings.insert(cb_node); } } // Create binding link between given image node and command buffer node void CoreChecks::AddCommandBufferBindingImage(GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) { // Skip validation if this image was created through WSI if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { // First update cb binding for image auto image_inserted = cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage}); if (image_inserted.second) { // Only need to continue if this is a new item (the rest of the work would have be done previous) image_state->cb_bindings.insert(cb_node); // Now update CB binding in MemObj mini CB list for (auto mem_binding : image_state->GetBoundMemory()) { DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(mem_binding); if (pMemInfo) { // Now update CBInfo's Mem reference list auto mem_inserted = cb_node->memObjs.insert(mem_binding); if (mem_inserted.second) { // Only need to complete the cross-reference if this is a new item pMemInfo->cb_bindings.insert(cb_node); } } } } } } // Create binding link between given image view node and its image with command buffer node void CoreChecks::AddCommandBufferBindingImageView(GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) { // First add bindings for imageView auto inserted = cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView}); if (inserted.second) { // Only need to continue if this is a new item view_state->cb_bindings.insert(cb_node); auto image_state = GetImageState(view_state->create_info.image); // Add bindings for image within imageView if (image_state) { AddCommandBufferBindingImage(cb_node, image_state); } } } // Create binding link between given buffer node and command buffer node void CoreChecks::AddCommandBufferBindingBuffer(GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) { // First update cb binding for buffer auto buffer_inserted = cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer}); if (buffer_inserted.second) { // Only need to continue if this is a new item buffer_state->cb_bindings.insert(cb_node); // Now update CB binding in MemObj mini CB list for (auto mem_binding : buffer_state->GetBoundMemory()) { DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(mem_binding); if (pMemInfo) { // Now update CBInfo's Mem reference list auto inserted = cb_node->memObjs.insert(mem_binding); if (inserted.second) { // Only need to complete the cross-reference if this is a new item pMemInfo->cb_bindings.insert(cb_node); } } } } } // Create binding link between given buffer view node and its buffer with command buffer node void CoreChecks::AddCommandBufferBindingBufferView(GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) { // First add bindings for bufferView auto inserted = cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView}); if (inserted.second) { // Only need to complete the cross-reference if this is a new item view_state->cb_bindings.insert(cb_node); auto buffer_state = GetBufferState(view_state->create_info.buffer); // Add bindings for buffer within bufferView if (buffer_state) { AddCommandBufferBindingBuffer(cb_node, buffer_state); } } } // For every mem obj bound to particular CB, free bindings related to that CB void CoreChecks::ClearCmdBufAndMemReferences(GLOBAL_CB_NODE *cb_node) { if (cb_node) { if (cb_node->memObjs.size() > 0) { for (auto mem : cb_node->memObjs) { DEVICE_MEM_INFO *pInfo = GetMemObjInfo(mem); if (pInfo) { pInfo->cb_bindings.erase(cb_node); } } cb_node->memObjs.clear(); } } } // Clear a single object binding from given memory object void CoreChecks::ClearMemoryObjectBinding(uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) { DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem); // This obj is bound to a memory object. Remove the reference to this object in that memory object's list if (mem_info) { mem_info->obj_bindings.erase({handle, type}); } } // ClearMemoryObjectBindings clears the binding of objects to memory // For the given object it pulls the memory bindings and makes sure that the bindings // no longer refer to the object being cleared. This occurs when objects are destroyed. void CoreChecks::ClearMemoryObjectBindings(uint64_t handle, VulkanObjectType type) { BINDABLE *mem_binding = GetObjectMemBinding(handle, type); if (mem_binding) { if (!mem_binding->sparse) { ClearMemoryObjectBinding(handle, type, mem_binding->binding.mem); } else { // Sparse, clear all bindings for (auto &sparse_mem_binding : mem_binding->sparse_bindings) { ClearMemoryObjectBinding(handle, type, sparse_mem_binding.mem); } } } } // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value. bool CoreChecks::VerifyBoundMemoryIsValid(VkDeviceMemory mem, uint64_t handle, const char *api_name, const char *type_name, const char *error_code) { bool result = false; if (VK_NULL_HANDLE == mem) { result = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code, "%s: Vk%s object %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().", api_name, type_name, report_data->FormatHandle(handle).c_str(), type_name); } else if (MEMORY_UNBOUND == mem) { result = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code, "%s: Vk%s object %s used with no memory bound and previously bound memory was freed. Memory must not be freed " "prior to this operation.", api_name, type_name, report_data->FormatHandle(handle).c_str()); } return result; } // Check to see if memory was ever bound to this image bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const char *api_name, const char *error_code) { bool result = false; if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) { result = VerifyBoundMemoryIsValid(image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image", error_code); } return result; } // Check to see if memory was bound to this buffer bool CoreChecks::ValidateMemoryIsBoundToBuffer(const BUFFER_STATE *buffer_state, const char *api_name, const char *error_code) { bool result = false; if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) { result = VerifyBoundMemoryIsValid(buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name, "Buffer", error_code); } return result; } // SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object. // Corresponding valid usage checks are in ValidateSetMemBinding(). void CoreChecks::SetMemBinding(VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset, uint64_t handle, VulkanObjectType type) { assert(mem_binding); mem_binding->binding.mem = mem; mem_binding->UpdateBoundMemorySet(); // force recreation of cached set mem_binding->binding.offset = memory_offset; mem_binding->binding.size = mem_binding->requirements.size; if (mem != VK_NULL_HANDLE) { DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem); if (mem_info) { mem_info->obj_bindings.insert({handle, type}); // For image objects, make sure default memory state is correctly set // TODO : What's the best/correct way to handle this? if (kVulkanObjectTypeImage == type) { auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding); if (image_state) { VkImageCreateInfo ici = image_state->createInfo; if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { // TODO:: More memory state transition stuff. } } } } } } // Valid usage checks for a call to SetMemBinding(). // For NULL mem case, output warning // Make sure given object is in global object map // IF a previous binding existed, output validation error // Otherwise, add reference from objectInfo to memoryInfo // Add reference off of objInfo // TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions. bool CoreChecks::ValidateSetMemBinding(VkDeviceMemory mem, uint64_t handle, VulkanObjectType type, const char *apiName) { bool skip = false; // It's an error to bind an object to NULL memory if (mem != VK_NULL_HANDLE) { BINDABLE *mem_binding = GetObjectMemBinding(handle, type); assert(mem_binding); if (mem_binding->sparse) { const char *error_code = "VUID-vkBindImageMemory-image-01045"; const char *handle_type = "IMAGE"; if (type == kVulkanObjectTypeBuffer) { error_code = "VUID-vkBindBufferMemory-buffer-01030"; handle_type = "BUFFER"; } else { assert(type == kVulkanObjectTypeImage); } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), error_code, "In %s, attempting to bind memory (%s) to object (%s) which was created with sparse memory flags " "(VK_%s_CREATE_SPARSE_*_BIT).", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(handle).c_str(), handle_type); } DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem); if (mem_info) { DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(mem_binding->binding.mem); if (prev_binding) { const char *error_code = "VUID-vkBindImageMemory-image-01044"; if (type == kVulkanObjectTypeBuffer) { error_code = "VUID-vkBindBufferMemory-buffer-01029"; } else { assert(type == kVulkanObjectTypeImage); } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), error_code, "In %s, attempting to bind memory (%s) to object (%s) which has already been bound to mem object %s.", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(handle).c_str(), report_data->FormatHandle(prev_binding->mem).c_str()); } else if (mem_binding->binding.mem == MEMORY_UNBOUND) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), kVUID_Core_MemTrack_RebindObject, "In %s, attempting to bind memory (%s) to object (%s) which was previous bound to memory that has " "since been freed. Memory bindings are immutable in " "Vulkan so this attempt to bind to new memory is not allowed.", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(handle).c_str()); } } } return skip; } // For NULL mem case, clear any previous binding Else... // Make sure given object is in its object map // IF a previous binding existed, update binding // Add reference from objectInfo to memoryInfo // Add reference off of object's binding info // Return VK_TRUE if addition is successful, VK_FALSE otherwise bool CoreChecks::SetSparseMemBinding(MEM_BINDING binding, uint64_t handle, VulkanObjectType type) { bool skip = VK_FALSE; // Handle NULL case separately, just clear previous binding & decrement reference if (binding.mem == VK_NULL_HANDLE) { // TODO : This should cause the range of the resource to be unbound according to spec } else { BINDABLE *mem_binding = GetObjectMemBinding(handle, type); assert(mem_binding); if (mem_binding) { // Invalid handles are reported by object tracker, but Get returns NULL for them, so avoid SEGV here assert(mem_binding->sparse); DEVICE_MEM_INFO *mem_info = GetMemObjInfo(binding.mem); if (mem_info) { mem_info->obj_bindings.insert({handle, type}); // Need to set mem binding for this object mem_binding->sparse_bindings.insert(binding); mem_binding->UpdateBoundMemorySet(); } } } return skip; } bool CoreChecks::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name, const char *error_code, bool optional = false) { bool skip = false; if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), error_code, "%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.", cmd_name, parameter_name); } else if (queue_family_index_map.find(queue_family) == queue_family_index_map.end()) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), error_code, "%s: %s (= %" PRIu32 ") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.", cmd_name, parameter_name, queue_family); } return skip; } bool CoreChecks::ValidateQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families, const char *cmd_name, const char *array_parameter_name, const char *unique_error_code, const char *valid_error_code, bool optional = false) { bool skip = false; if (queue_families) { std::unordered_set<uint32_t> set; for (uint32_t i = 0; i < queue_family_count; ++i) { std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]"; if (set.count(queue_families[i])) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), unique_error_code, "%s: %s (=%" PRIu32 ") is not unique within %s array.", cmd_name, parameter_name.c_str(), queue_families[i], array_parameter_name); } else { set.insert(queue_families[i]); skip |= ValidateDeviceQueueFamily(queue_families[i], cmd_name, parameter_name.c_str(), valid_error_code, optional); } } } return skip; } // Check object status for selected flag state bool CoreChecks::ValidateStatus(GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags, const char *fail_msg, const char *msg_code) { if (!(pNode->status & status_mask)) { return log_msg(report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pNode->commandBuffer), msg_code, "command buffer object %s: %s..", report_data->FormatHandle(pNode->commandBuffer).c_str(), fail_msg); } return false; } // Retrieve pipeline node ptr for given pipeline object PIPELINE_STATE *CoreChecks::GetPipelineState(VkPipeline pipeline) { auto it = pipelineMap.find(pipeline); if (it == pipelineMap.end()) { return nullptr; } return it->second.get(); } RENDER_PASS_STATE *CoreChecks::GetRenderPassState(VkRenderPass renderpass) { auto it = renderPassMap.find(renderpass); if (it == renderPassMap.end()) { return nullptr; } return it->second.get(); } std::shared_ptr<RENDER_PASS_STATE> CoreChecks::GetRenderPassStateSharedPtr(VkRenderPass renderpass) { auto it = renderPassMap.find(renderpass); if (it == renderPassMap.end()) { return nullptr; } return it->second; } FRAMEBUFFER_STATE *CoreChecks::GetFramebufferState(VkFramebuffer framebuffer) { auto it = frameBufferMap.find(framebuffer); if (it == frameBufferMap.end()) { return nullptr; } return it->second.get(); } std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(CoreChecks const *dev_data, VkDescriptorSetLayout dsLayout) { auto it = dev_data->descriptorSetLayoutMap.find(dsLayout); if (it == dev_data->descriptorSetLayoutMap.end()) { return nullptr; } return it->second; } PIPELINE_LAYOUT_NODE const *CoreChecks::GetPipelineLayout(VkPipelineLayout pipeLayout) { auto it = pipelineLayoutMap.find(pipeLayout); if (it == pipelineLayoutMap.end()) { return nullptr; } return &it->second; } shader_module const *CoreChecks::GetShaderModuleState(VkShaderModule module) { auto it = shaderModuleMap.find(module); if (it == shaderModuleMap.end()) { return nullptr; } return it->second.get(); } const TEMPLATE_STATE *CoreChecks::GetDescriptorTemplateState(VkDescriptorUpdateTemplateKHR descriptor_update_template) { const auto it = desc_template_map.find(descriptor_update_template); if (it == desc_template_map.cend()) { return nullptr; } return it->second.get(); } // Return true if for a given PSO, the given state enum is dynamic, else return false static bool IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) { if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) { for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true; } } return false; } // Validate state stored as flags at time of draw call bool CoreChecks::ValidateDrawStateFlags(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed, const char *msg_code) { bool result = false; if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) { result |= ValidateStatus(pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic line width state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pRasterizationState && (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic depth bias state not set for this command buffer", msg_code); } if (pPipe->blendConstantsEnabled) { result |= ValidateStatus(pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic blend constants state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pDepthStencilState && (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic depth bounds state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pDepthStencilState && (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil read mask state not set for this command buffer", msg_code); result |= ValidateStatus(pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil write mask state not set for this command buffer", msg_code); result |= ValidateStatus(pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil reference state not set for this command buffer", msg_code); } if (indexed) { result |= ValidateStatus(pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code); } return result; } bool CoreChecks::LogInvalidAttachmentMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach, const char *msg, const char *caller, const char *error_code) { return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(rp1_state->renderPass), error_code, "%s: RenderPasses incompatible between %s w/ renderPass %s and %s w/ renderPass %s Attachment %u is not " "compatible with %u: %s.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(), primary_attach, secondary_attach, msg); } bool CoreChecks::ValidateAttachmentCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach, const char *caller, const char *error_code) { bool skip = false; const auto &primaryPassCI = rp1_state->createInfo; const auto &secondaryPassCI = rp2_state->createInfo; if (primaryPassCI.attachmentCount <= primary_attach) { primary_attach = VK_ATTACHMENT_UNUSED; } if (secondaryPassCI.attachmentCount <= secondary_attach) { secondary_attach = VK_ATTACHMENT_UNUSED; } if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) { return skip; } if (primary_attach == VK_ATTACHMENT_UNUSED) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "The first is unused while the second is not.", caller, error_code); return skip; } if (secondary_attach == VK_ATTACHMENT_UNUSED) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "The second is unused while the first is not.", caller, error_code); return skip; } if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different formats.", caller, error_code); } if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different samples.", caller, error_code); } if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different flags.", caller, error_code); } return skip; } bool CoreChecks::ValidateSubpassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass, const char *caller, const char *error_code) { bool skip = false; const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass]; const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass]; uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount); for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) { uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.inputAttachmentCount) { primary_input_attach = primary_desc.pInputAttachments[i].attachment; } if (i < secondary_desc.inputAttachmentCount) { secondary_input_attach = secondary_desc.pInputAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach, secondary_input_attach, caller, error_code); } uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount); for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) { uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.colorAttachmentCount) { primary_color_attach = primary_desc.pColorAttachments[i].attachment; } if (i < secondary_desc.colorAttachmentCount) { secondary_color_attach = secondary_desc.pColorAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_color_attach, secondary_color_attach, caller, error_code); if (rp1_state->createInfo.subpassCount > 1) { uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) { primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment; } if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) { secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach, secondary_resolve_attach, caller, error_code); } } uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED; if (primary_desc.pDepthStencilAttachment) { primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment; } if (secondary_desc.pDepthStencilAttachment) { secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach, secondary_depthstencil_attach, caller, error_code); return skip; } // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible. // This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and // will then feed into this function bool CoreChecks::ValidateRenderPassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *caller, const char *error_code) { bool skip = false; if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(rp1_state->renderPass), error_code, "%s: RenderPasses incompatible between %s w/ renderPass %s with a subpassCount of %u and %s w/ renderPass " "%s with a subpassCount of %u.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), rp1_state->createInfo.subpassCount, type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(), rp2_state->createInfo.subpassCount); } else { for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) { skip |= ValidateSubpassCompatibility(type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code); } } return skip; } // Return Set node ptr for specified set or else NULL cvdescriptorset::DescriptorSet *CoreChecks::GetSetNode(VkDescriptorSet set) { auto set_it = setMap.find(set); if (set_it == setMap.end()) { return NULL; } return set_it->second; } // For given pipeline, return number of MSAA samples, or one if MSAA disabled static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) { if (pipe->graphicsPipelineCI.pMultisampleState != NULL && VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) { return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples; } return VK_SAMPLE_COUNT_1_BIT; } static void ListBits(std::ostream &s, uint32_t bits) { for (int i = 0; i < 32 && bits; i++) { if (bits & (1 << i)) { s << i; bits &= ~(1 << i); if (bits) { s << ","; } } } } // Validate draw-time state related to the PSO bool CoreChecks::ValidatePipelineDrawtimeState(LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB, CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) { bool skip = false; // Verify vertex binding if (pPipeline->vertex_binding_descriptions_.size() > 0) { for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) { const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding; if ((pCB->current_draw_data.vertex_buffer_bindings.size() < (vertex_binding + 1)) || (pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer == VK_NULL_HANDLE)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_VtxIndexOutOfBounds, "The Pipeline State Object (%s) expects that this Command Buffer's vertex binding Index %u should be set via " "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at " "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.", report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), vertex_binding, i, vertex_binding); } } // Verify vertex attribute address alignment for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) { const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i]; const auto vertex_binding = attribute_description.binding; const auto attribute_offset = attribute_description.offset; const auto attribute_format = attribute_description.format; const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding); if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) && (vertex_binding < pCB->current_draw_data.vertex_buffer_bindings.size()) && (pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer != VK_NULL_HANDLE)) { const auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride; const auto vertex_buffer_offset = pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].offset; const auto buffer_state = GetBufferState(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer); // Use only memory binding offset as base memory should be properly aligned by the driver const auto buffer_binding_address = buffer_state->binding.offset + vertex_buffer_offset; // Use 1 as vertex/instance index to use buffer stride as well const auto attrib_address = buffer_binding_address + vertex_buffer_stride + attribute_offset; uint32_t vtx_attrib_req_alignment = FormatElementSize(attribute_format); if (FormatElementIsTexel(attribute_format)) { vtx_attrib_req_alignment /= FormatChannelCount(attribute_format); } if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer), kVUID_Core_DrawState_InvalidVtxAttributeAlignment, "Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER " from pipeline (%s) and vertex buffer (%s).", i, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), report_data->FormatHandle(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer).c_str()); } } } } else { if ((!pCB->current_draw_data.vertex_buffer_bindings.empty()) && (!pCB->vertex_buffer_used)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_VtxIndexOutOfBounds, "Vertex buffers are bound to command buffer (%s) but no vertex buffers are attached to this Pipeline " "State Object (%s).", report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(state.pipeline_state->pipeline).c_str()); } } // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count. // Skip check if rasterization is disabled or there is no viewport. if ((!pPipeline->graphicsPipelineCI.pRasterizationState || (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) && pPipeline->graphicsPipelineCI.pViewportState) { bool dynViewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT); bool dynScissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR); if (dynViewport) { const auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1; const auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask; if (missingViewportMask) { std::stringstream ss; ss << "Dynamic viewport(s) "; ListBits(ss, missingViewportMask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport()."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str()); } } if (dynScissor) { const auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1; const auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask; if (missingScissorMask) { std::stringstream ss; ss << "Dynamic scissor(s) "; ListBits(ss, missingScissorMask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor()."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str()); } } } // Verify that any MSAA request in PSO matches sample# in bound FB // Skip the check if rasterization is disabled. if (!pPipeline->graphicsPipelineCI.pRasterizationState || (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline); if (pCB->activeRenderPass) { const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr(); const VkSubpassDescription2KHR *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass]; uint32_t i; unsigned subpass_num_samples = 0; for (i = 0; i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples; } if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples; } if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples) && ((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NumSamplesMismatch, "Num samples mismatch! At draw-time in Pipeline (%s) with %u samples while current RenderPass (%s) w/ " "%u samples!", report_data->FormatHandle(pPipeline->pipeline).c_str(), pso_num_samples, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), subpass_num_samples); } } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NoActiveRenderpass, "No active render pass found at draw-time in Pipeline (%s)!", report_data->FormatHandle(pPipeline->pipeline).c_str()); } } // Verify that PSO creation renderPass is compatible with active renderPass if (pCB->activeRenderPass) { // TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type // TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted // Error codes for renderpass and subpass mismatches auto rp_error = "VUID-vkCmdDraw-renderPass-00435", sp_error = "VUID-vkCmdDraw-subpass-00436"; switch (cmd_type) { case CMD_DRAWINDEXED: rp_error = "VUID-vkCmdDrawIndexed-renderPass-00454"; sp_error = "VUID-vkCmdDrawIndexed-subpass-00455"; break; case CMD_DRAWINDIRECT: rp_error = "VUID-vkCmdDrawIndirect-renderPass-00479"; sp_error = "VUID-vkCmdDrawIndirect-subpass-00480"; break; case CMD_DRAWINDIRECTCOUNTAMD: rp_error = "VUID-vkCmdDrawIndirectCountAMD-renderPass-00507"; sp_error = "VUID-vkCmdDrawIndirectCountAMD-subpass-00508"; break; case CMD_DRAWINDIRECTCOUNTKHR: rp_error = "VUID-vkCmdDrawIndirectCountKHR-renderPass-03113"; sp_error = "VUID-vkCmdDrawIndirectCountKHR-subpass-03114"; break; case CMD_DRAWINDEXEDINDIRECT: rp_error = "VUID-vkCmdDrawIndexedIndirect-renderPass-00531"; sp_error = "VUID-vkCmdDrawIndexedIndirect-subpass-00532"; break; case CMD_DRAWINDEXEDINDIRECTCOUNTAMD: rp_error = "VUID-vkCmdDrawIndexedIndirectCountAMD-renderPass-00560"; sp_error = "VUID-vkCmdDrawIndexedIndirectCountAMD-subpass-00561"; break; case CMD_DRAWINDEXEDINDIRECTCOUNTKHR: rp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-renderPass-03145"; sp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-subpass-03146"; break; case CMD_DRAWMESHTASKSNV: rp_error = "VUID-vkCmdDrawMeshTasksNV-renderPass-02120"; sp_error = "VUID-vkCmdDrawMeshTasksNV-subpass-02121"; break; case CMD_DRAWMESHTASKSINDIRECTNV: rp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-renderPass-02148"; sp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-subpass-02149"; break; case CMD_DRAWMESHTASKSINDIRECTCOUNTNV: rp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-renderPass-02184"; sp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-subpass-02185"; break; default: assert(CMD_DRAW == cmd_type); break; } if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) { // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with skip |= ValidateRenderPassCompatibility("active render pass", pCB->activeRenderPass, "pipeline state object", pPipeline->rp_state.get(), caller, rp_error); } if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, HandleToUint64(pPipeline->pipeline), sp_error, "Pipeline was built for subpass %u but used in subpass %u.", pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass); } } return skip; } // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to // pipelineLayout[layoutIndex] static bool VerifySetLayoutCompatibility(const cvdescriptorset::DescriptorSet *descriptor_set, PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex, string &errorMsg) { auto num_sets = pipeline_layout->set_layouts.size(); if (layoutIndex >= num_sets) { stringstream errorStr; errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index " << layoutIndex; errorMsg = errorStr.str(); return false; } if (descriptor_set->IsPushDescriptor()) return true; auto layout_node = pipeline_layout->set_layouts[layoutIndex]; return descriptor_set->IsCompatible(layout_node.get(), &errorMsg); } // Validate overall state at the time of a draw call bool CoreChecks::ValidateCmdBufDrawState(GLOBAL_CB_NODE *cb_node, CMD_TYPE cmd_type, const bool indexed, const VkPipelineBindPoint bind_point, const char *function, const char *pipe_err_code, const char *state_err_code) { bool result = false; auto const &state = cb_node->lastBound[bind_point]; PIPELINE_STATE *pPipe = state.pipeline_state; if (nullptr == pPipe) { return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), pipe_err_code, "Must not call %s on this command buffer while there is no %s pipeline bound.", function, bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute"); } // First check flag states if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result = ValidateDrawStateFlags(cb_node, pPipe, indexed, state_err_code); // Now complete other state checks string errorString; auto const &pipeline_layout = pPipe->pipeline_layout; for (const auto &set_binding_pair : pPipe->active_slots) { uint32_t setIndex = set_binding_pair.first; // If valid set is not bound throw an error if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) { result |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_DescriptorSetNotBound, "VkPipeline %s uses set #%u but that set is not bound.", report_data->FormatHandle(pPipe->pipeline).c_str(), setIndex); } else if (!VerifySetLayoutCompatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex, errorString)) { // Set is bound but not compatible w/ overlapping pipeline_layout from PSO VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet(); result |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(setHandle), kVUID_Core_DrawState_PipelineLayoutsIncompatible, "VkDescriptorSet (%s) bound as set #%u is not compatible with overlapping VkPipelineLayout %s due to: %s", report_data->FormatHandle(setHandle).c_str(), setIndex, report_data->FormatHandle(pipeline_layout.layout).c_str(), errorString.c_str()); } else { // Valid set is bound and layout compatible, validate that it's updated // Pull the set node cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex]; // Validate the draw-time state for this descriptor set std::string err_str; if (!descriptor_set->IsPushDescriptor()) { // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor // binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks. // Here, the currently bound pipeline determines whether an image validation check is redundant... // for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline. const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_node, pPipe); const auto &binding_req_map = reduced_map.Map(); if (!descriptor_set->ValidateDrawState(binding_req_map, state.dynamicOffsets[setIndex], cb_node, function, &err_str)) { auto set = descriptor_set->GetSet(); result |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), kVUID_Core_DrawState_DescriptorSetNotUpdated, "Descriptor set %s bound as set #%u encountered the following validation error at %s time: %s", report_data->FormatHandle(set).c_str(), setIndex, function, err_str.c_str()); } } } } // Check general pipeline state that needs to be validated at drawtime if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result |= ValidatePipelineDrawtimeState(state, cb_node, cmd_type, pPipe, function); return result; } void CoreChecks::UpdateDrawState(GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) { auto const &state = cb_state->lastBound[bind_point]; PIPELINE_STATE *pPipe = state.pipeline_state; if (VK_NULL_HANDLE != state.pipeline_layout) { for (const auto &set_binding_pair : pPipe->active_slots) { uint32_t setIndex = set_binding_pair.first; // Pull the set node cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex]; if (!descriptor_set->IsPushDescriptor()) { // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_state); const auto &binding_req_map = reduced_map.Map(); // Bind this set and its active descriptor resources to the command buffer descriptor_set->UpdateDrawState(this, cb_state, binding_req_map); // For given active slots record updated images & buffers descriptor_set->GetStorageUpdates(binding_req_map, &cb_state->updateBuffers, &cb_state->updateImages); } } } if (!pPipe->vertex_binding_descriptions_.empty()) { cb_state->vertex_buffer_used = true; } } bool CoreChecks::ValidatePipelineLocked(std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) { bool skip = false; PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get(); // If create derivative bit is set, check that we've specified a base // pipeline correctly, and that the base pipeline was created to allow // derivatives. if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { PIPELINE_STATE *pBasePipeline = nullptr; if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^ (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) { // This check is a superset of "VUID-VkGraphicsPipelineCreateInfo-flags-00724" and // "VUID-VkGraphicsPipelineCreateInfo-flags-00725" skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState, "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified"); } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) { if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-vkCreateGraphicsPipelines-flags-00720", "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline."); } else { pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get(); } } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { pBasePipeline = GetPipelineState(pPipeline->graphicsPipelineCI.basePipelineHandle); } if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState, "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives."); } } return skip; } // UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function. bool CoreChecks::ValidatePipelineUnlocked(std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) { bool skip = false; PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get(); // Ensure the subpass index is valid. If not, then ValidateAndCapturePipelineShaderState // produces nonsense errors that confuse users. Other layers should already // emit errors for renderpass being invalid. auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass]; if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-00759", "Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u).", pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1); subpass_desc = nullptr; } if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) { const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState; if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746", "vkCreateGraphicsPipelines(): Render pass (%s) subpass %u has colorAttachmentCount of %u which doesn't " "match the pColorBlendState->attachmentCount of %u.", report_data->FormatHandle(pPipeline->rp_state->renderPass).c_str(), pPipeline->graphicsPipelineCI.subpass, subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount); } if (!enabled_features.core.independentBlend) { if (pPipeline->attachments.size() > 1) { VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0]; for (size_t i = 1; i < pPipeline->attachments.size(); i++) { // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains // only attachment state, so memcmp is best suited for the comparison if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]), sizeof(pAttachments[0]))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605", "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of " "pAttachments must be identical."); break; } } } } if (!enabled_features.core.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606", "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE."); } for (size_t i = 0; i < pPipeline->attachments.size(); i++) { if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor); } } if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor); } } if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor); } } if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor); } } } } if (ValidateAndCapturePipelineShaderState(pPipeline)) { skip = true; } // Each shader's stage must be unique if (pPipeline->duplicate_shaders) { for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) { if (pPipeline->duplicate_shaders & stage) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState, "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s", string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage))); } } } if (device_extensions.vk_nv_mesh_shader) { // VS or mesh is required if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-stage-02096", "Invalid Pipeline CreateInfo State: Vertex Shader or Mesh Shader required."); } // Can't mix mesh and VTG if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) && (pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-02095", "Invalid Pipeline CreateInfo State: Geometric shader stages must either be all mesh (mesh | task) " "or all VTG (vertex, tess control, tess eval, geom)."); } } else { // VS is required if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-stage-00727", "Invalid Pipeline CreateInfo State: Vertex Shader required."); } } if (!enabled_features.mesh_shader.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineShaderStageCreateInfo-stage-02091", "Invalid Pipeline CreateInfo State: Mesh Shader not supported."); } if (!enabled_features.mesh_shader.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineShaderStageCreateInfo-stage-02092", "Invalid Pipeline CreateInfo State: Task Shader not supported."); } // Either both or neither TC/TE shaders should be defined bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0; bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0; if (has_control && !has_eval) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-00729", "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair."); } if (!has_control && has_eval) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-00730", "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair."); } // Compute shaders should be specified independent of Gfx shaders if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-stage-00728", "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline."); } if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pInputAssemblyState) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-02098", "Invalid Pipeline CreateInfo State: Missing pInputAssemblyState."); } // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines. // Mismatching primitive topology and tessellation fails graphics pipeline creation. if (has_control && has_eval && (!pPipeline->graphicsPipelineCI.pInputAssemblyState || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-00736", "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for " "tessellation pipelines."); } if (pPipeline->graphicsPipelineCI.pInputAssemblyState) { if (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) { if (!has_control || !has_eval) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-topology-00737", "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid " "for tessellation pipelines."); } } if ((pPipeline->graphicsPipelineCI.pInputAssemblyState->primitiveRestartEnable == VK_TRUE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428", "topology is %s and primitiveRestartEnable is VK_TRUE. It is invalid.", string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } if ((enabled_features.core.geometryShader == VK_FALSE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429", "topology is %s and geometry shaders feature is not enabled. It is invalid.", string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } if ((enabled_features.core.tessellationShader == VK_FALSE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430", "topology is %s and tessellation shaders feature is not enabled. It is invalid.", string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } } // If a rasterization state is provided... if (pPipeline->graphicsPipelineCI.pRasterizationState) { if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) && (!enabled_features.core.depthClamp)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782", "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member " "of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE."); } if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) && (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_InvalidFeature, "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member " "of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the " "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled"); } // If rasterization is enabled... if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) { if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) && (!enabled_features.core.alphaToOne)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785", "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable " "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE."); } // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure if (subpass_desc && subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (!pPipeline->graphicsPipelineCI.pDepthStencilState) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752", "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled " "and subpass uses a depth/stencil attachment."); } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) && (!enabled_features.core.depthBounds)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598", "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the " "depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be " "set to VK_FALSE."); } } // If subpass uses color attachments, pColorBlendState must be valid pointer if (subpass_desc) { uint32_t color_attachment_count = 0; for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) { if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) { ++color_attachment_count; } } if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753", "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and " "subpass uses color attachments."); } } } } if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-02097", "Invalid Pipeline CreateInfo State: Missing pVertexInputState."); } auto vi = pPipeline->graphicsPipelineCI.pVertexInputState; if (vi != NULL) { for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) { VkFormat format = vi->pVertexAttributeDescriptions[j].format; // Internal call to get format info. Still goes through layers, could potentially go directly to ICD. VkFormatProperties properties; DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &properties); if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputAttributeDescription-format-00623", "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format " "(%s) is not a supported vertex buffer format.", pipelineIndex, j, string_VkFormat(format)); } } } auto accumColorSamples = [subpass_desc, pPipeline](uint32_t &samples) { for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) { samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } } }; if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples)) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_num_samples = 0; accumColorSamples(subpass_num_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } // subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED. // Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED. if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-00757", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "does not match the number of samples of the RenderPass color and/or depth attachment.", pipelineIndex, raster_samples); } } if (device_extensions.vk_amd_mixed_attachment_samples) { VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0); for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) { if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) { max_sample_count = std::max(max_sample_count, pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples); } } if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { max_sample_count = std::max(max_sample_count, pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples); } if ((pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) && (pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01505", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max " "attachment samples (%s) used in subpass %u.", pipelineIndex, string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples), string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass); } } if (device_extensions.vk_nv_framebuffer_mixed_samples) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_color_samples = 0; accumColorSamples(subpass_color_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; const uint32_t subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); if (pPipeline->graphicsPipelineCI.pDepthStencilState) { const bool ds_test_enabled = (pPipeline->graphicsPipelineCI.pDepthStencilState->depthTestEnable == VK_TRUE) || (pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) || (pPipeline->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE); if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01411", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "does not match the number of samples of the RenderPass depth attachment (%u).", pipelineIndex, raster_samples, subpass_depth_samples); } } } if (IsPowerOfTwo(subpass_color_samples)) { if (raster_samples < subpass_color_samples) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01412", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "is not greater or equal to the number of samples of the RenderPass color attachment (%u).", pipelineIndex, raster_samples, subpass_color_samples); } if (pPipeline->graphicsPipelineCI.pMultisampleState) { if ((raster_samples > subpass_color_samples) && (pPipeline->graphicsPipelineCI.pMultisampleState->sampleShadingEnable == VK_TRUE)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->sampleShadingEnable must be VK_FALSE when " "pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) is greater than the number of samples of the " "subpass color attachment (%u).", pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples); } const auto *coverage_modulation_state = lvl_find_in_chain<VkPipelineCoverageModulationStateCreateInfoNV>( pPipeline->graphicsPipelineCI.pMultisampleState->pNext); if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) { if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405", "vkCreateGraphicsPipelines: pCreateInfos[%d] VkPipelineCoverageModulationStateCreateInfoNV " "coverageModulationTableCount of %u is invalid.", pipelineIndex, coverage_modulation_state->coverageModulationTableCount); } } } } } if (device_extensions.vk_nv_fragment_coverage_to_color) { const auto coverage_to_color_state = lvl_find_in_chain<VkPipelineCoverageToColorStateCreateInfoNV>(pPipeline->graphicsPipelineCI.pMultisampleState); if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) { bool attachment_is_valid = false; std::string error_detail; if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) { const auto color_attachment_ref = subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation]; if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { const auto color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment]; switch (color_attachment.format) { case VK_FORMAT_R8_UINT: case VK_FORMAT_R8_SINT: case VK_FORMAT_R16_UINT: case VK_FORMAT_R16_SINT: case VK_FORMAT_R32_UINT: case VK_FORMAT_R32_SINT: attachment_is_valid = true; break; default: string_sprintf(&error_detail, "references an attachment with an invalid format (%s).", string_VkFormat(color_attachment.format)); break; } } else { string_sprintf(&error_detail, "references an invalid attachment. The subpass pColorAttachments[%" PRIu32 "].attachment has the value " "VK_ATTACHMENT_UNUSED.", coverage_to_color_state->coverageToColorLocation); } } else { string_sprintf(&error_detail, "references an non-existing attachment since the subpass colorAttachmentCount is %" PRIu32 ".", subpass_desc->colorAttachmentCount); } if (!attachment_is_valid) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404", "vkCreateGraphicsPipelines: pCreateInfos[%" PRId32 "].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV " "coverageToColorLocation = %" PRIu32 " %s", pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str()); } } } return skip; } // Block of code at start here specifically for managing/tracking DSs // Return Pool node ptr for specified pool or else NULL DESCRIPTOR_POOL_STATE *CoreChecks::GetDescriptorPoolState(const VkDescriptorPool pool) { auto pool_it = descriptorPoolMap.find(pool); if (pool_it == descriptorPoolMap.end()) { return NULL; } return pool_it->second; } // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer // func_str is the name of the calling function // Return false if no errors occur // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain) bool CoreChecks::ValidateIdleDescriptorSet(VkDescriptorSet set, const char *func_str) { if (disabled.idle_descriptor_set) return false; bool skip = false; auto set_node = setMap.find(set); if (set_node == setMap.end()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), kVUID_Core_DrawState_DoubleDestroy, "Cannot call %s() on descriptor set %s that has not been allocated.", func_str, report_data->FormatHandle(set).c_str()); } else { // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here if (set_node->second->in_use.load()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), "VUID-vkFreeDescriptorSets-pDescriptorSets-00309", "Cannot call %s() on descriptor set %s that is in use by a command buffer.", func_str, report_data->FormatHandle(set).c_str()); } } return skip; } // Remove set from setMap and delete the set void CoreChecks::FreeDescriptorSet(cvdescriptorset::DescriptorSet *descriptor_set) { setMap.erase(descriptor_set->GetSet()); delete descriptor_set; } // Free all DS Pools including their Sets & related sub-structs // NOTE : Calls to this function should be wrapped in mutex void CoreChecks::DeletePools() { for (auto ii = descriptorPoolMap.begin(); ii != descriptorPoolMap.end();) { // Remove this pools' sets from setMap and delete them for (auto ds : ii->second->sets) { FreeDescriptorSet(ds); } ii->second->sets.clear(); delete ii->second; ii = descriptorPoolMap.erase(ii); } } // For given CB object, fetch associated CB Node from map GLOBAL_CB_NODE *CoreChecks::GetCBNode(const VkCommandBuffer cb) { auto it = commandBufferMap.find(cb); if (it == commandBufferMap.end()) { return NULL; } return it->second; } // If a renderpass is active, verify that the given command type is appropriate for current subpass state bool CoreChecks::ValidateCmdSubpassState(const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) { if (!pCB->activeRenderPass) return false; bool skip = false; if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS && cmd_type != CMD_NEXTSUBPASS2KHR && cmd_type != CMD_ENDRENDERPASS2KHR)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer, "Commands cannot be called in a subpass using secondary command buffers."); } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer, "vkCmdExecuteCommands() cannot be called in a subpass using inline commands."); } return skip; } bool CoreChecks::ValidateCmdQueueFlags(const GLOBAL_CB_NODE *cb_node, const char *caller_name, VkQueueFlags required_flags, const char *error_code) { auto pool = GetCommandPoolNode(cb_node->createInfo.commandPool); if (pool) { VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].queueFlags; if (!(required_flags & queue_flags)) { string required_flags_string; for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) { if (flag & required_flags) { if (required_flags_string.size()) { required_flags_string += " or "; } required_flags_string += string_VkQueueFlagBits(flag); } } return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), error_code, "Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name, required_flags_string.c_str()); } } return false; } static char const *GetCauseStr(VK_OBJECT obj) { if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated"; if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded"; return "destroyed"; } bool CoreChecks::ReportInvalidCommandBuffer(const GLOBAL_CB_NODE *cb_state, const char *call_source) { bool skip = false; for (auto obj : cb_state->broken_bindings) { const char *type_str = object_string[obj.type]; const char *cause_str = GetCauseStr(obj); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer, "You are adding %s to command buffer %s that is invalid because bound %s %s was %s.", call_source, report_data->FormatHandle(cb_state->commandBuffer).c_str(), type_str, report_data->FormatHandle(obj.handle).c_str(), cause_str); } return skip; } // 'commandBuffer must be in the recording state' valid usage error code for each command // Note: grepping for ^^^^^^^^^ in vk_validation_database is easily massaged into the following list // Note: C++11 doesn't automatically devolve enum types to the underlying type for hash traits purposes (fixed in C++14) using CmdTypeHashType = std::underlying_type<CMD_TYPE>::type; static const std::unordered_map<CmdTypeHashType, std::string> must_be_recording_map = { {CMD_NONE, kVUIDUndefined}, // UNMATCHED {CMD_BEGINQUERY, "VUID-vkCmdBeginQuery-commandBuffer-recording"}, {CMD_BEGINRENDERPASS, "VUID-vkCmdBeginRenderPass-commandBuffer-recording"}, {CMD_BEGINRENDERPASS2KHR, "VUID-vkCmdBeginRenderPass2KHR-commandBuffer-recording"}, {CMD_BINDDESCRIPTORSETS, "VUID-vkCmdBindDescriptorSets-commandBuffer-recording"}, {CMD_BINDINDEXBUFFER, "VUID-vkCmdBindIndexBuffer-commandBuffer-recording"}, {CMD_BINDPIPELINE, "VUID-vkCmdBindPipeline-commandBuffer-recording"}, {CMD_BINDSHADINGRATEIMAGE, "VUID-vkCmdBindShadingRateImageNV-commandBuffer-recording"}, {CMD_BINDVERTEXBUFFERS, "VUID-vkCmdBindVertexBuffers-commandBuffer-recording"}, {CMD_BLITIMAGE, "VUID-vkCmdBlitImage-commandBuffer-recording"}, {CMD_CLEARATTACHMENTS, "VUID-vkCmdClearAttachments-commandBuffer-recording"}, {CMD_CLEARCOLORIMAGE, "VUID-vkCmdClearColorImage-commandBuffer-recording"}, {CMD_CLEARDEPTHSTENCILIMAGE, "VUID-vkCmdClearDepthStencilImage-commandBuffer-recording"}, {CMD_COPYBUFFER, "VUID-vkCmdCopyBuffer-commandBuffer-recording"}, {CMD_COPYBUFFERTOIMAGE, "VUID-vkCmdCopyBufferToImage-commandBuffer-recording"}, {CMD_COPYIMAGE, "VUID-vkCmdCopyImage-commandBuffer-recording"}, {CMD_COPYIMAGETOBUFFER, "VUID-vkCmdCopyImageToBuffer-commandBuffer-recording"}, {CMD_COPYQUERYPOOLRESULTS, "VUID-vkCmdCopyQueryPoolResults-commandBuffer-recording"}, {CMD_DEBUGMARKERBEGINEXT, "VUID-vkCmdDebugMarkerBeginEXT-commandBuffer-recording"}, {CMD_DEBUGMARKERENDEXT, "VUID-vkCmdDebugMarkerEndEXT-commandBuffer-recording"}, {CMD_DEBUGMARKERINSERTEXT, "VUID-vkCmdDebugMarkerInsertEXT-commandBuffer-recording"}, {CMD_DISPATCH, "VUID-vkCmdDispatch-commandBuffer-recording"}, // Exclude KHX (if not already present) { CMD_DISPATCHBASEKHX, "VUID-vkCmdDispatchBase-commandBuffer-recording" }, {CMD_DISPATCHINDIRECT, "VUID-vkCmdDispatchIndirect-commandBuffer-recording"}, {CMD_DRAW, "VUID-vkCmdDraw-commandBuffer-recording"}, {CMD_DRAWINDEXED, "VUID-vkCmdDrawIndexed-commandBuffer-recording"}, {CMD_DRAWINDEXEDINDIRECT, "VUID-vkCmdDrawIndexedIndirect-commandBuffer-recording"}, // Exclude vendor ext (if not already present) { CMD_DRAWINDEXEDINDIRECTCOUNTAMD, // "VUID-vkCmdDrawIndexedIndirectCountAMD-commandBuffer-recording" }, {CMD_DRAWINDEXEDINDIRECTCOUNTKHR, "VUID-vkCmdDrawIndexedIndirectCountKHR-commandBuffer-recording"}, {CMD_DRAWINDIRECT, "VUID-vkCmdDrawIndirect-commandBuffer-recording"}, // Exclude vendor ext (if not already present) { CMD_DRAWINDIRECTCOUNTAMD, // "VUID-vkCmdDrawIndirectCountAMD-commandBuffer-recording" }, {CMD_DRAWINDIRECTCOUNTKHR, "VUID-vkCmdDrawIndirectCountKHR-commandBuffer-recording"}, {CMD_DRAWMESHTASKSNV, "VUID-vkCmdDrawMeshTasksNV-commandBuffer-recording"}, {CMD_DRAWMESHTASKSINDIRECTNV, "VUID-vkCmdDrawMeshTasksIndirectNV-commandBuffer-recording"}, {CMD_DRAWMESHTASKSINDIRECTCOUNTNV, "VUID-vkCmdDrawMeshTasksIndirectCountNV-commandBuffer-recording"}, {CMD_ENDCOMMANDBUFFER, "VUID-vkEndCommandBuffer-commandBuffer-00059"}, {CMD_ENDQUERY, "VUID-vkCmdEndQuery-commandBuffer-recording"}, {CMD_ENDRENDERPASS, "VUID-vkCmdEndRenderPass-commandBuffer-recording"}, {CMD_ENDRENDERPASS2KHR, "VUID-vkCmdEndRenderPass2KHR-commandBuffer-recording"}, {CMD_EXECUTECOMMANDS, "VUID-vkCmdExecuteCommands-commandBuffer-recording"}, {CMD_FILLBUFFER, "VUID-vkCmdFillBuffer-commandBuffer-recording"}, {CMD_NEXTSUBPASS, "VUID-vkCmdNextSubpass-commandBuffer-recording"}, {CMD_NEXTSUBPASS2KHR, "VUID-vkCmdNextSubpass2KHR-commandBuffer-recording"}, {CMD_PIPELINEBARRIER, "VUID-vkCmdPipelineBarrier-commandBuffer-recording"}, // Exclude vendor ext (if not already present) { CMD_PROCESSCOMMANDSNVX, "VUID-vkCmdProcessCommandsNVX-commandBuffer-recording" // }, {CMD_PUSHCONSTANTS, "VUID-vkCmdPushConstants-commandBuffer-recording"}, {CMD_PUSHDESCRIPTORSETKHR, "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-recording"}, {CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-recording"}, // Exclude vendor ext (if not already present) { CMD_RESERVESPACEFORCOMMANDSNVX, // "VUID-vkCmdReserveSpaceForCommandsNVX-commandBuffer-recording" }, {CMD_RESETEVENT, "VUID-vkCmdResetEvent-commandBuffer-recording"}, {CMD_RESETQUERYPOOL, "VUID-vkCmdResetQueryPool-commandBuffer-recording"}, {CMD_RESOLVEIMAGE, "VUID-vkCmdResolveImage-commandBuffer-recording"}, {CMD_SETBLENDCONSTANTS, "VUID-vkCmdSetBlendConstants-commandBuffer-recording"}, {CMD_SETDEPTHBIAS, "VUID-vkCmdSetDepthBias-commandBuffer-recording"}, {CMD_SETDEPTHBOUNDS, "VUID-vkCmdSetDepthBounds-commandBuffer-recording"}, // Exclude KHX (if not already present) { CMD_SETDEVICEMASKKHX, "VUID-vkCmdSetDeviceMask-commandBuffer-recording" }, {CMD_SETDISCARDRECTANGLEEXT, "VUID-vkCmdSetDiscardRectangleEXT-commandBuffer-recording"}, {CMD_SETEVENT, "VUID-vkCmdSetEvent-commandBuffer-recording"}, {CMD_SETEXCLUSIVESCISSOR, "VUID-vkCmdSetExclusiveScissorNV-commandBuffer-recording"}, {CMD_SETLINEWIDTH, "VUID-vkCmdSetLineWidth-commandBuffer-recording"}, {CMD_SETSAMPLELOCATIONSEXT, "VUID-vkCmdSetSampleLocationsEXT-commandBuffer-recording"}, {CMD_SETSCISSOR, "VUID-vkCmdSetScissor-commandBuffer-recording"}, {CMD_SETSTENCILCOMPAREMASK, "VUID-vkCmdSetStencilCompareMask-commandBuffer-recording"}, {CMD_SETSTENCILREFERENCE, "VUID-vkCmdSetStencilReference-commandBuffer-recording"}, {CMD_SETSTENCILWRITEMASK, "VUID-vkCmdSetStencilWriteMask-commandBuffer-recording"}, {CMD_SETVIEWPORT, "VUID-vkCmdSetViewport-commandBuffer-recording"}, {CMD_SETVIEWPORTSHADINGRATEPALETTE, "VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-recording"}, // Exclude vendor ext (if not already present) { CMD_SETVIEWPORTWSCALINGNV, // "VUID-vkCmdSetViewportWScalingNV-commandBuffer-recording" }, {CMD_UPDATEBUFFER, "VUID-vkCmdUpdateBuffer-commandBuffer-recording"}, {CMD_WAITEVENTS, "VUID-vkCmdWaitEvents-commandBuffer-recording"}, {CMD_WRITETIMESTAMP, "VUID-vkCmdWriteTimestamp-commandBuffer-recording"}, }; // Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if // there's an issue with the Cmd ordering bool CoreChecks::ValidateCmd(const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) { switch (cb_state->state) { case CB_RECORDING: return ValidateCmdSubpassState(cb_state, cmd); case CB_INVALID_COMPLETE: case CB_INVALID_INCOMPLETE: return ReportInvalidCommandBuffer(cb_state, caller_name); default: auto error_it = must_be_recording_map.find(cmd); // This assert lets us know that a vkCmd.* entrypoint has been added without enabling it in the map assert(error_it != must_be_recording_map.cend()); if (error_it == must_be_recording_map.cend()) { error_it = must_be_recording_map.find(CMD_NONE); // But we'll handle the asserting case, in case of a test gap } const auto error = error_it->second; return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), error, "You must call vkBeginCommandBuffer() before this call to %s.", caller_name); } } bool CoreChecks::ValidateDeviceMaskToPhysicalDeviceCount(uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle, const char *VUID) { bool skip = false; uint32_t count = 1 << physical_device_count; if (count <= deviceMask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID, "deviceMask(0x%" PRIx32 ") is invaild. Physical device count is %" PRIu32 ".", deviceMask, physical_device_count); } return skip; } bool CoreChecks::ValidateDeviceMaskToZero(uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle, const char *VUID) { bool skip = false; if (deviceMask == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID, "deviceMask(0x%" PRIx32 ") must be non-zero.", deviceMask); } return skip; } bool CoreChecks::ValidateDeviceMaskToCommandBuffer(GLOBAL_CB_NODE *pCB, uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle, const char *VUID) { bool skip = false; if ((deviceMask & pCB->initial_device_mask) != deviceMask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of the command buffer[%s] initial device mask(0x%" PRIx32 ").", deviceMask, report_data->FormatHandle(pCB->commandBuffer).c_str(), pCB->initial_device_mask); } return skip; } bool CoreChecks::ValidateDeviceMaskToRenderPass(GLOBAL_CB_NODE *pCB, uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle, const char *VUID) { bool skip = false; if ((deviceMask & pCB->active_render_pass_device_mask) != deviceMask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of the render pass[%s] device mask(0x%" PRIx32 ").", deviceMask, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), pCB->active_render_pass_device_mask); } return skip; } // For given object struct return a ptr of BASE_NODE type for its wrapping struct BASE_NODE *CoreChecks::GetStateStructPtrFromObject(VK_OBJECT object_struct) { BASE_NODE *base_ptr = nullptr; switch (object_struct.type) { case kVulkanObjectTypeDescriptorSet: { base_ptr = GetSetNode(reinterpret_cast<VkDescriptorSet &>(object_struct.handle)); break; } case kVulkanObjectTypeSampler: { base_ptr = GetSamplerState(reinterpret_cast<VkSampler &>(object_struct.handle)); break; } case kVulkanObjectTypeQueryPool: { base_ptr = GetQueryPoolNode(reinterpret_cast<VkQueryPool &>(object_struct.handle)); break; } case kVulkanObjectTypePipeline: { base_ptr = GetPipelineState(reinterpret_cast<VkPipeline &>(object_struct.handle)); break; } case kVulkanObjectTypeBuffer: { base_ptr = GetBufferState(reinterpret_cast<VkBuffer &>(object_struct.handle)); break; } case kVulkanObjectTypeBufferView: { base_ptr = GetBufferViewState(reinterpret_cast<VkBufferView &>(object_struct.handle)); break; } case kVulkanObjectTypeImage: { base_ptr = GetImageState(reinterpret_cast<VkImage &>(object_struct.handle)); break; } case kVulkanObjectTypeImageView: { base_ptr = GetImageViewState(reinterpret_cast<VkImageView &>(object_struct.handle)); break; } case kVulkanObjectTypeEvent: { base_ptr = GetEventNode(reinterpret_cast<VkEvent &>(object_struct.handle)); break; } case kVulkanObjectTypeDescriptorPool: { base_ptr = GetDescriptorPoolState(reinterpret_cast<VkDescriptorPool &>(object_struct.handle)); break; } case kVulkanObjectTypeCommandPool: { base_ptr = GetCommandPoolNode(reinterpret_cast<VkCommandPool &>(object_struct.handle)); break; } case kVulkanObjectTypeFramebuffer: { base_ptr = GetFramebufferState(reinterpret_cast<VkFramebuffer &>(object_struct.handle)); break; } case kVulkanObjectTypeRenderPass: { base_ptr = GetRenderPassState(reinterpret_cast<VkRenderPass &>(object_struct.handle)); break; } case kVulkanObjectTypeDeviceMemory: { base_ptr = GetMemObjInfo(reinterpret_cast<VkDeviceMemory &>(object_struct.handle)); break; } default: // TODO : Any other objects to be handled here? assert(0); break; } return base_ptr; } // Tie the VK_OBJECT to the cmd buffer which includes: // Add object_binding to cmd buffer // Add cb_binding to object static void AddCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) { cb_bindings->insert(cb_node); cb_node->object_bindings.insert(obj); } // For a given object, if cb_node is in that objects cb_bindings, remove cb_node void CoreChecks::RemoveCommandBufferBinding(VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) { BASE_NODE *base_obj = GetStateStructPtrFromObject(*object); if (base_obj) base_obj->cb_bindings.erase(cb_node); } // Reset the command buffer state // Maintain the createInfo and set state to CB_NEW, but clear all other state void CoreChecks::ResetCommandBufferState(const VkCommandBuffer cb) { GLOBAL_CB_NODE *pCB = commandBufferMap[cb]; if (pCB) { pCB->in_use.store(0); // Reset CB state (note that createInfo is not cleared) pCB->commandBuffer = cb; memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo)); memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo)); pCB->hasDrawCmd = false; pCB->state = CB_NEW; pCB->submitCount = 0; pCB->image_layout_change_count = 1; // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty pCB->status = 0; pCB->static_status = 0; pCB->viewportMask = 0; pCB->scissorMask = 0; for (auto &item : pCB->lastBound) { item.second.reset(); } memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo)); pCB->activeRenderPass = nullptr; pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE; pCB->activeSubpass = 0; pCB->broken_bindings.clear(); pCB->waitedEvents.clear(); pCB->events.clear(); pCB->writeEventsBeforeWait.clear(); pCB->waitedEventsBeforeQueryReset.clear(); pCB->queryToStateMap.clear(); pCB->activeQueries.clear(); pCB->startedQueries.clear(); pCB->image_layout_map.clear(); pCB->eventToStageMap.clear(); pCB->draw_data.clear(); pCB->current_draw_data.vertex_buffer_bindings.clear(); pCB->vertex_buffer_used = false; pCB->primaryCommandBuffer = VK_NULL_HANDLE; // If secondary, invalidate any primary command buffer that may call us. if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) { InvalidateCommandBuffers(pCB->linkedCommandBuffers, {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer}); } // Remove reverse command buffer links. for (auto pSubCB : pCB->linkedCommandBuffers) { pSubCB->linkedCommandBuffers.erase(pCB); } pCB->linkedCommandBuffers.clear(); pCB->updateImages.clear(); pCB->updateBuffers.clear(); ClearCmdBufAndMemReferences(pCB); pCB->queue_submit_functions.clear(); pCB->cmd_execute_commands_functions.clear(); pCB->eventUpdates.clear(); pCB->queryUpdates.clear(); // Remove object bindings for (auto obj : pCB->object_bindings) { RemoveCommandBufferBinding(&obj, pCB); } pCB->object_bindings.clear(); // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list for (auto framebuffer : pCB->framebuffers) { auto fb_state = GetFramebufferState(framebuffer); if (fb_state) fb_state->cb_bindings.erase(pCB); } pCB->framebuffers.clear(); pCB->activeFramebuffer = VK_NULL_HANDLE; memset(&pCB->index_buffer_binding, 0, sizeof(pCB->index_buffer_binding)); pCB->qfo_transfer_image_barriers.Reset(); pCB->qfo_transfer_buffer_barriers.Reset(); // Clean up the label data ResetCmdDebugUtilsLabel(report_data, pCB->commandBuffer); pCB->debug_label.Reset(); } } CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) { // initially assume everything is static state CBStatusFlags flags = CBSTATUS_ALL_STATE_SET; if (ds) { for (uint32_t i = 0; i < ds->dynamicStateCount; i++) { switch (ds->pDynamicStates[i]) { case VK_DYNAMIC_STATE_LINE_WIDTH: flags &= ~CBSTATUS_LINE_WIDTH_SET; break; case VK_DYNAMIC_STATE_DEPTH_BIAS: flags &= ~CBSTATUS_DEPTH_BIAS_SET; break; case VK_DYNAMIC_STATE_BLEND_CONSTANTS: flags &= ~CBSTATUS_BLEND_CONSTANTS_SET; break; case VK_DYNAMIC_STATE_DEPTH_BOUNDS: flags &= ~CBSTATUS_DEPTH_BOUNDS_SET; break; case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK: flags &= ~CBSTATUS_STENCIL_READ_MASK_SET; break; case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK: flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET; break; case VK_DYNAMIC_STATE_STENCIL_REFERENCE: flags &= ~CBSTATUS_STENCIL_REFERENCE_SET; break; case VK_DYNAMIC_STATE_SCISSOR: flags &= ~CBSTATUS_SCISSOR_SET; break; case VK_DYNAMIC_STATE_VIEWPORT: flags &= ~CBSTATUS_VIEWPORT_SET; break; case VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV: flags &= ~CBSTATUS_EXCLUSIVE_SCISSOR_SET; break; case VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV: flags &= ~CBSTATUS_SHADING_RATE_PALETTE_SET; break; default: break; } } } return flags; } // Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a // render pass. bool CoreChecks::InsideRenderPass(const GLOBAL_CB_NODE *pCB, const char *apiName, const char *msgCode) { bool inside = false; if (pCB->activeRenderPass) { inside = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), msgCode, "%s: It is invalid to issue this call inside an active render pass (%s).", apiName, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str()); } return inside; } // Flags validation error if the associated call is made outside a render pass. The apiName // routine should ONLY be called inside a render pass. bool CoreChecks::OutsideRenderPass(GLOBAL_CB_NODE *pCB, const char *apiName, const char *msgCode) { bool outside = false; if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) || ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) && !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) { outside = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), msgCode, "%s: This call must be issued inside an active render pass.", apiName); } return outside; } void CoreChecks::InitGpuValidation() { // Process the layer settings file. enum CoreValidationGpuFlagBits { CORE_VALIDATION_GPU_VALIDATION_ALL_BIT = 0x00000001, CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT = 0x00000002, }; typedef VkFlags CoreGPUFlags; static const std::unordered_map<std::string, VkFlags> gpu_flags_option_definitions = { {std::string("all"), CORE_VALIDATION_GPU_VALIDATION_ALL_BIT}, {std::string("reserve_binding_slot"), CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT}, }; std::string gpu_flags_key = "lunarg_core_validation.gpu_validation"; CoreGPUFlags gpu_flags = GetLayerOptionFlags(gpu_flags_key, gpu_flags_option_definitions, 0); if (gpu_flags & CORE_VALIDATION_GPU_VALIDATION_ALL_BIT) { instance_state->enabled.gpu_validation = true; } if (gpu_flags & CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT) { instance_state->enabled.gpu_validation_reserve_binding_slot = true; } } void CoreChecks::PostCallRecordCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance, VkResult result) { if (VK_SUCCESS != result) return; InitGpuValidation(); } bool CoreChecks::ValidatePhysicalDeviceQueueFamily(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family, const char *err_code, const char *cmd_name, const char *queue_family_var_name) { bool skip = false; const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : ""; std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) ? "the pQueueFamilyPropertyCount was never obtained" : "i.e. is not less than " + std::to_string(pd_state->queue_family_count); if (requested_queue_family >= pd_state->queue_family_count) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), err_code, "%s: %s (= %" PRIu32 ") is not less than any previously obtained pQueueFamilyPropertyCount from " "vkGetPhysicalDeviceQueueFamilyProperties%s (%s).", cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str()); } return skip; } // Verify VkDeviceQueueCreateInfos bool CoreChecks::ValidateDeviceQueueCreateInfos(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t info_count, const VkDeviceQueueCreateInfo *infos) { bool skip = false; std::unordered_set<uint32_t> queue_family_set; for (uint32_t i = 0; i < info_count; ++i) { const auto requested_queue_family = infos[i].queueFamilyIndex; // Verify that requested queue family is known to be valid at this point in time std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex"; skip |= ValidatePhysicalDeviceQueueFamily(pd_state, requested_queue_family, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381", "vkCreateDevice", queue_family_var_name.c_str()); if (queue_family_set.count(requested_queue_family)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(pd_state->phys_device), "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372", "CreateDevice(): %s (=%" PRIu32 ") is not unique within pQueueCreateInfos.", queue_family_var_name.c_str(), requested_queue_family); } else { queue_family_set.insert(requested_queue_family); } // Verify that requested queue count of queue family is known to be valid at this point in time if (requested_queue_family < pd_state->queue_family_count) { const auto requested_queue_count = infos[i].queueCount; const auto queue_family_props_count = pd_state->queue_family_properties.size(); const bool queue_family_has_props = requested_queue_family < queue_family_props_count; const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : ""; std::string count_note = !queue_family_has_props ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained" : "i.e. is not less than or equal to " + std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount); if (!queue_family_has_props || requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), "VUID-VkDeviceQueueCreateInfo-queueCount-00382", "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32 ") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).", i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str()); } } } return skip; } bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { bool skip = false; auto pd_state = GetPhysicalDeviceState(gpu); // TODO: object_tracker should perhaps do this instead // and it does not seem to currently work anyway -- the loader just crashes before this point if (!pd_state) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, kVUID_Core_DevLimit_MustQueryCount, "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices()."); } skip |= ValidateDeviceQueueCreateInfos(pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos); return skip; } void CoreChecks::PreCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, std::unique_ptr<safe_VkDeviceCreateInfo> &modified_create_info) { // GPU Validation can possibly turn on device features, so give it a chance to change the create info. if (GetEnables()->gpu_validation) { VkPhysicalDeviceFeatures supported_features; DispatchGetPhysicalDeviceFeatures(gpu, &supported_features); GpuPreCallRecordCreateDevice(gpu, modified_create_info, &supported_features); } } void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) { if (VK_SUCCESS != result) return; const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures; if (nullptr == enabled_features_found) { const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext); if (features2) { enabled_features_found = &(features2->features); } } ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map); ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation); CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data); if (nullptr == enabled_features_found) { core_checks->enabled_features.core = {}; } else { core_checks->enabled_features.core = *enabled_features_found; } // Make sure that queue_family_properties are obtained for this device's physical_device, even if the app has not // previously set them through an explicit API call. uint32_t count; auto pd_state = GetPhysicalDeviceState(gpu); DispatchGetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr); pd_state->queue_family_count = count; pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count)); DispatchGetPhysicalDeviceQueueFamilyProperties(gpu, &count, &pd_state->queue_family_properties[0]); // Save local link to this device's physical device state core_checks->physical_device_state = pd_state; const auto *device_group_ci = lvl_find_in_chain<VkDeviceGroupDeviceCreateInfo>(pCreateInfo->pNext); core_checks->physical_device_count = device_group_ci && device_group_ci->physicalDeviceCount > 0 ? device_group_ci->physicalDeviceCount : 1; const auto *descriptor_indexing_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pCreateInfo->pNext); if (descriptor_indexing_features) { core_checks->enabled_features.descriptor_indexing = *descriptor_indexing_features; } const auto *eight_bit_storage_features = lvl_find_in_chain<VkPhysicalDevice8BitStorageFeaturesKHR>(pCreateInfo->pNext); if (eight_bit_storage_features) { core_checks->enabled_features.eight_bit_storage = *eight_bit_storage_features; } const auto *exclusive_scissor_features = lvl_find_in_chain<VkPhysicalDeviceExclusiveScissorFeaturesNV>(pCreateInfo->pNext); if (exclusive_scissor_features) { core_checks->enabled_features.exclusive_scissor = *exclusive_scissor_features; } const auto *shading_rate_image_features = lvl_find_in_chain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext); if (shading_rate_image_features) { core_checks->enabled_features.shading_rate_image = *shading_rate_image_features; } const auto *mesh_shader_features = lvl_find_in_chain<VkPhysicalDeviceMeshShaderFeaturesNV>(pCreateInfo->pNext); if (mesh_shader_features) { core_checks->enabled_features.mesh_shader = *mesh_shader_features; } const auto *inline_uniform_block_features = lvl_find_in_chain<VkPhysicalDeviceInlineUniformBlockFeaturesEXT>(pCreateInfo->pNext); if (inline_uniform_block_features) { core_checks->enabled_features.inline_uniform_block = *inline_uniform_block_features; } const auto *transform_feedback_features = lvl_find_in_chain<VkPhysicalDeviceTransformFeedbackFeaturesEXT>(pCreateInfo->pNext); if (transform_feedback_features) { core_checks->enabled_features.transform_feedback_features = *transform_feedback_features; } const auto *float16_int8_features = lvl_find_in_chain<VkPhysicalDeviceFloat16Int8FeaturesKHR>(pCreateInfo->pNext); if (float16_int8_features) { core_checks->enabled_features.float16_int8 = *float16_int8_features; } const auto *vtx_attrib_div_features = lvl_find_in_chain<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT>(pCreateInfo->pNext); if (vtx_attrib_div_features) { core_checks->enabled_features.vtx_attrib_divisor_features = *vtx_attrib_div_features; } const auto *scalar_block_layout_features = lvl_find_in_chain<VkPhysicalDeviceScalarBlockLayoutFeaturesEXT>(pCreateInfo->pNext); if (scalar_block_layout_features) { core_checks->enabled_features.scalar_block_layout_features = *scalar_block_layout_features; } const auto *buffer_address = lvl_find_in_chain<VkPhysicalDeviceBufferAddressFeaturesEXT>(pCreateInfo->pNext); if (buffer_address) { core_checks->enabled_features.buffer_address = *buffer_address; } const auto *cooperative_matrix_features = lvl_find_in_chain<VkPhysicalDeviceCooperativeMatrixFeaturesNV>(pCreateInfo->pNext); if (cooperative_matrix_features) { core_checks->enabled_features.cooperative_matrix_features = *cooperative_matrix_features; } const auto *float_controls_features = lvl_find_in_chain<VkPhysicalDeviceFloatControlsPropertiesKHR>(pCreateInfo->pNext); if (float_controls_features) { core_checks->enabled_features.float_controls = *float_controls_features; } // Store physical device properties and physical device mem limits into CoreChecks structs DispatchGetPhysicalDeviceMemoryProperties(gpu, &core_checks->phys_dev_mem_props); DispatchGetPhysicalDeviceProperties(gpu, &core_checks->phys_dev_props); if (core_checks->device_extensions.vk_khr_push_descriptor) { // Get the needed push_descriptor limits auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop); DispatchGetPhysicalDeviceProperties2KHR(gpu, &prop2); core_checks->phys_dev_ext_props.max_push_descriptors = push_descriptor_prop.maxPushDescriptors; } if (core_checks->device_extensions.vk_ext_descriptor_indexing) { // Get the needed descriptor_indexing limits auto descriptor_indexing_props = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingPropertiesEXT>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&descriptor_indexing_props); DispatchGetPhysicalDeviceProperties2KHR(gpu, &prop2); core_checks->phys_dev_ext_props.descriptor_indexing_props = descriptor_indexing_props; } if (core_checks->device_extensions.vk_nv_shading_rate_image) { // Get the needed shading rate image limits auto shading_rate_image_props = lvl_init_struct<VkPhysicalDeviceShadingRateImagePropertiesNV>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&shading_rate_image_props); DispatchGetPhysicalDeviceProperties2KHR(gpu, &prop2); core_checks->phys_dev_ext_props.shading_rate_image_props = shading_rate_image_props; } if (core_checks->device_extensions.vk_nv_mesh_shader) { // Get the needed mesh shader limits auto mesh_shader_props = lvl_init_struct<VkPhysicalDeviceMeshShaderPropertiesNV>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&mesh_shader_props); DispatchGetPhysicalDeviceProperties2KHR(gpu, &prop2); core_checks->phys_dev_ext_props.mesh_shader_props = mesh_shader_props; } if (core_checks->device_extensions.vk_ext_inline_uniform_block) { // Get the needed inline uniform block limits auto inline_uniform_block_props = lvl_init_struct<VkPhysicalDeviceInlineUniformBlockPropertiesEXT>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&inline_uniform_block_props); DispatchGetPhysicalDeviceProperties2KHR(gpu, &prop2); core_checks->phys_dev_ext_props.inline_uniform_block_props = inline_uniform_block_props; } if (core_checks->device_extensions.vk_ext_vertex_attribute_divisor) { // Get the needed vertex attribute divisor limits auto vtx_attrib_divisor_props = lvl_init_struct<VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&vtx_attrib_divisor_props); DispatchGetPhysicalDeviceProperties2KHR(gpu, &prop2); core_checks->phys_dev_ext_props.vtx_attrib_divisor_props = vtx_attrib_divisor_props; } if (core_checks->device_extensions.vk_khr_depth_stencil_resolve) { // Get the needed depth and stencil resolve modes auto depth_stencil_resolve_props = lvl_init_struct<VkPhysicalDeviceDepthStencilResolvePropertiesKHR>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&depth_stencil_resolve_props); DispatchGetPhysicalDeviceProperties2KHR(gpu, &prop2); core_checks->phys_dev_ext_props.depth_stencil_resolve_props = depth_stencil_resolve_props; } if (GetEnables()->gpu_validation) { core_checks->GpuPostCallRecordCreateDevice(GetEnables()); } if (core_checks->device_extensions.vk_nv_cooperative_matrix) { // Get the needed cooperative_matrix properties auto cooperative_matrix_props = lvl_init_struct<VkPhysicalDeviceCooperativeMatrixPropertiesNV>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&cooperative_matrix_props); instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2); core_checks->phys_dev_ext_props.cooperative_matrix_props = cooperative_matrix_props; uint32_t numCooperativeMatrixProperties = 0; instance_dispatch_table.GetPhysicalDeviceCooperativeMatrixPropertiesNV(gpu, &numCooperativeMatrixProperties, NULL); core_checks->cooperative_matrix_properties.resize(numCooperativeMatrixProperties, lvl_init_struct<VkCooperativeMatrixPropertiesNV>()); instance_dispatch_table.GetPhysicalDeviceCooperativeMatrixPropertiesNV(gpu, &numCooperativeMatrixProperties, core_checks->cooperative_matrix_properties.data()); } // Store queue family data if ((pCreateInfo != nullptr) && (pCreateInfo->pQueueCreateInfos != nullptr)) { for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; ++i) { core_checks->queue_family_index_map.insert( std::make_pair(pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex, pCreateInfo->pQueueCreateInfos[i].queueCount)); } } } void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { if (!device) return; if (GetEnables()->gpu_validation) { GpuPreCallRecordDestroyDevice(); } pipelineMap.clear(); renderPassMap.clear(); for (auto ii = commandBufferMap.begin(); ii != commandBufferMap.end(); ++ii) { delete (*ii).second; } commandBufferMap.clear(); // This will also delete all sets in the pool & remove them from setMap DeletePools(); // All sets should be removed assert(setMap.empty()); descriptorSetLayoutMap.clear(); imageViewMap.clear(); imageMap.clear(); imageSubresourceMap.clear(); imageLayoutMap.clear(); bufferViewMap.clear(); bufferMap.clear(); // Queues persist until device is destroyed queueMap.clear(); layer_debug_utils_destroy_device(device); } // For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id // and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id. // Similarly for mesh and task shaders. bool CoreChecks::ValidateStageMaskGsTsEnables(VkPipelineStageFlags stageMask, const char *caller, const char *geo_error_id, const char *tess_error_id, const char *mesh_error_id, const char *task_error_id) { bool skip = false; if (!enabled_features.core.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, geo_error_id, "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have " "geometryShader feature enabled.", caller); } if (!enabled_features.core.tessellationShader && (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, tess_error_id, "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or " "VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have " "tessellationShader feature enabled.", caller); } if (!enabled_features.mesh_shader.meshShader && (stageMask & VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, mesh_error_id, "%s call includes a stageMask with VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV bit set when device does not have " "VkPhysicalDeviceMeshShaderFeaturesNV::meshShader feature enabled.", caller); } if (!enabled_features.mesh_shader.taskShader && (stageMask & VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, task_error_id, "%s call includes a stageMask with VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV bit set when device does not have " "VkPhysicalDeviceMeshShaderFeaturesNV::taskShader feature enabled.", caller); } return skip; } // Loop through bound objects and increment their in_use counts. void CoreChecks::IncrementBoundObjects(GLOBAL_CB_NODE const *cb_node) { for (auto obj : cb_node->object_bindings) { auto base_obj = GetStateStructPtrFromObject(obj); if (base_obj) { base_obj->in_use.fetch_add(1); } } } // Track which resources are in-flight by atomically incrementing their "in_use" count void CoreChecks::IncrementResources(GLOBAL_CB_NODE *cb_node) { cb_node->submitCount++; cb_node->in_use.fetch_add(1); // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below IncrementBoundObjects(cb_node); // TODO : We should be able to remove the NULL look-up checks from the code below as long as // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state // should then be flagged prior to calling this function for (auto draw_data_element : cb_node->draw_data) { for (auto &vertex_buffer : draw_data_element.vertex_buffer_bindings) { auto buffer_state = GetBufferState(vertex_buffer.buffer); if (buffer_state) { buffer_state->in_use.fetch_add(1); } } } for (auto event : cb_node->writeEventsBeforeWait) { auto event_state = GetEventNode(event); if (event_state) event_state->write_in_use++; } } // Note: This function assumes that the global lock is held by the calling thread. // For the given queue, verify the queue state up to the given seq number. // Currently the only check is to make sure that if there are events to be waited on prior to // a QueryReset, make sure that all such events have been signalled. bool CoreChecks::VerifyQueueStateToSeq(QUEUE_STATE *initial_queue, uint64_t initial_seq) { bool skip = false; // sequence number we want to validate up to, per queue std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}}; // sequence number we've completed validation for, per queue std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs; std::vector<QUEUE_STATE *> worklist{initial_queue}; while (worklist.size()) { auto queue = worklist.back(); worklist.pop_back(); auto target_seq = target_seqs[queue]; auto seq = std::max(done_seqs[queue], queue->seq); auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq for (; seq < target_seq; ++sub_it, ++seq) { for (auto &wait : sub_it->waitSemaphores) { auto other_queue = GetQueueState(wait.queue); if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here. auto other_target_seq = std::max(target_seqs[other_queue], wait.seq); auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq); // if this wait is for another queue, and covers new sequence // numbers beyond what we've already validated, mark the new // target seq and (possibly-re)add the queue to the worklist. if (other_done_seq < other_target_seq) { target_seqs[other_queue] = other_target_seq; worklist.push_back(other_queue); } } } // finally mark the point we've now validated this queue to. done_seqs[queue] = seq; } return skip; } // When the given fence is retired, verify outstanding queue operations through the point of the fence bool CoreChecks::VerifyQueueStateToFence(VkFence fence) { auto fence_state = GetFenceNode(fence); if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) { return VerifyQueueStateToSeq(GetQueueState(fence_state->signaler.first), fence_state->signaler.second); } return false; } // Decrement in-use count for objects bound to command buffer void CoreChecks::DecrementBoundResources(GLOBAL_CB_NODE const *cb_node) { BASE_NODE *base_obj = nullptr; for (auto obj : cb_node->object_bindings) { base_obj = GetStateStructPtrFromObject(obj); if (base_obj) { base_obj->in_use.fetch_sub(1); } } } void CoreChecks::RetireWorkOnQueue(QUEUE_STATE *pQueue, uint64_t seq) { std::unordered_map<VkQueue, uint64_t> otherQueueSeqs; // Roll this queue forward, one submission at a time. while (pQueue->seq < seq) { auto &submission = pQueue->submissions.front(); for (auto &wait : submission.waitSemaphores) { auto pSemaphore = GetSemaphoreNode(wait.semaphore); if (pSemaphore) { pSemaphore->in_use.fetch_sub(1); } auto &lastSeq = otherQueueSeqs[wait.queue]; lastSeq = std::max(lastSeq, wait.seq); } for (auto &semaphore : submission.signalSemaphores) { auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore) { pSemaphore->in_use.fetch_sub(1); } } for (auto &semaphore : submission.externalSemaphores) { auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore) { pSemaphore->in_use.fetch_sub(1); } } for (auto cb : submission.cbs) { auto cb_node = GetCBNode(cb); if (!cb_node) { continue; } // First perform decrement on general case bound objects DecrementBoundResources(cb_node); for (auto draw_data_element : cb_node->draw_data) { for (auto &vertex_buffer_binding : draw_data_element.vertex_buffer_bindings) { auto buffer_state = GetBufferState(vertex_buffer_binding.buffer); if (buffer_state) { buffer_state->in_use.fetch_sub(1); } } } for (auto event : cb_node->writeEventsBeforeWait) { auto eventNode = eventMap.find(event); if (eventNode != eventMap.end()) { eventNode->second.write_in_use--; } } for (auto queryStatePair : cb_node->queryToStateMap) { queryToStateMap[queryStatePair.first] = queryStatePair.second; } for (auto eventStagePair : cb_node->eventToStageMap) { eventMap[eventStagePair.first].stageMask = eventStagePair.second; } cb_node->in_use.fetch_sub(1); } auto pFence = GetFenceNode(submission.fence); if (pFence && pFence->scope == kSyncScopeInternal) { pFence->state = FENCE_RETIRED; } pQueue->submissions.pop_front(); pQueue->seq++; } // Roll other queues forward to the highest seq we saw a wait for for (auto qs : otherQueueSeqs) { RetireWorkOnQueue(GetQueueState(qs.first), qs.second); } } // Submit a fence to a queue, delimiting previous fences and previous untracked // work by it. static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) { pFence->state = FENCE_INFLIGHT; pFence->signaler.first = pQueue->queue; pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount; } bool CoreChecks::ValidateCommandBufferSimultaneousUse(GLOBAL_CB_NODE *pCB, int current_submit_count) { bool skip = false; if ((pCB->in_use.load() || current_submit_count > 1) && !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, "VUID-vkQueueSubmit-pCommandBuffers-00071", "Command Buffer %s is already in use and is not marked for simultaneous use.", report_data->FormatHandle(pCB->commandBuffer).c_str()); } return skip; } bool CoreChecks::ValidateCommandBufferState(GLOBAL_CB_NODE *cb_state, const char *call_source, int current_submit_count, const char *vu_id) { bool skip = false; if (disabled.command_buffer_state) return skip; // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (cb_state->submitCount + current_submit_count > 1)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, kVUID_Core_DrawState_CommandBufferSingleSubmitViolation, "Commandbuffer %s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64 "times.", report_data->FormatHandle(cb_state->commandBuffer).c_str(), cb_state->submitCount + current_submit_count); } // Validate that cmd buffers have been updated switch (cb_state->state) { case CB_INVALID_INCOMPLETE: case CB_INVALID_COMPLETE: skip |= ReportInvalidCommandBuffer(cb_state, call_source); break; case CB_NEW: skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(cb_state->commandBuffer), vu_id, "Command buffer %s used in the call to %s is unrecorded and contains no commands.", report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source); break; case CB_RECORDING: skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), kVUID_Core_DrawState_NoEndCommandBuffer, "You must call vkEndCommandBuffer() on command buffer %s before this call to %s!", report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source); break; default: /* recorded */ break; } return skip; } bool CoreChecks::ValidateResources(GLOBAL_CB_NODE *cb_node) { bool skip = false; // TODO : We should be able to remove the NULL look-up checks from the code below as long as // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state // should then be flagged prior to calling this function for (const auto &draw_data_element : cb_node->draw_data) { for (const auto &vertex_buffer_binding : draw_data_element.vertex_buffer_bindings) { auto buffer_state = GetBufferState(vertex_buffer_binding.buffer); if ((vertex_buffer_binding.buffer != VK_NULL_HANDLE) && (!buffer_state)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(vertex_buffer_binding.buffer), kVUID_Core_DrawState_InvalidBuffer, "Cannot submit cmd buffer using deleted buffer %s.", report_data->FormatHandle(vertex_buffer_binding.buffer).c_str()); } } } return skip; } // Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices bool CoreChecks::ValidImageBufferQueue(GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count, const uint32_t *indices) { bool found = false; bool skip = false; auto queue_state = GetQueueState(queue); if (queue_state) { for (uint32_t i = 0; i < count; i++) { if (indices[i] == queue_state->queueFamilyIndex) { found = true; break; } } if (!found) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type], object->handle, kVUID_Core_DrawState_InvalidQueueFamily, "vkQueueSubmit: Command buffer %s contains %s %s which was not created allowing concurrent access to " "this queue family %d.", report_data->FormatHandle(cb_node->commandBuffer).c_str(), object_string[object->type], report_data->FormatHandle(object->handle).c_str(), queue_state->queueFamilyIndex); } } return skip; } // Validate that queueFamilyIndices of primary command buffers match this queue // Secondary command buffers were previously validated in vkCmdExecuteCommands(). bool CoreChecks::ValidateQueueFamilyIndices(GLOBAL_CB_NODE *pCB, VkQueue queue) { bool skip = false; auto pPool = GetCommandPoolNode(pCB->createInfo.commandPool); auto queue_state = GetQueueState(queue); if (pPool && queue_state) { if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), "VUID-vkQueueSubmit-pCommandBuffers-00074", "vkQueueSubmit: Primary command buffer %s created in queue family %d is being submitted on queue %s " "from queue family %d.", report_data->FormatHandle(pCB->commandBuffer).c_str(), pPool->queueFamilyIndex, report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex); } // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family for (auto object : pCB->object_bindings) { if (object.type == kVulkanObjectTypeImage) { auto image_state = GetImageState(reinterpret_cast<VkImage &>(object.handle)); if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { skip |= ValidImageBufferQueue(pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount, image_state->createInfo.pQueueFamilyIndices); } } else if (object.type == kVulkanObjectTypeBuffer) { auto buffer_state = GetBufferState(reinterpret_cast<VkBuffer &>(object.handle)); if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { skip |= ValidImageBufferQueue(pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount, buffer_state->createInfo.pQueueFamilyIndices); } } } } return skip; } bool CoreChecks::ValidatePrimaryCommandBufferState(GLOBAL_CB_NODE *pCB, int current_submit_count, QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards, QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) { // Track in-use for resources off of primary and any secondary CBs bool skip = false; // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing // on device skip |= ValidateCommandBufferSimultaneousUse(pCB, current_submit_count); skip |= ValidateResources(pCB); skip |= ValidateQueuedQFOTransfers(pCB, qfo_image_scoreboards, qfo_buffer_scoreboards); for (auto pSubCB : pCB->linkedCommandBuffers) { skip |= ValidateResources(pSubCB); skip |= ValidateQueuedQFOTransfers(pSubCB, qfo_image_scoreboards, qfo_buffer_scoreboards); // TODO: replace with InvalidateCommandBuffers() at recording. if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) && !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, "VUID-vkQueueSubmit-pCommandBuffers-00073", "Commandbuffer %s was submitted with secondary buffer %s but that buffer has subsequently been bound to " "primary cmd buffer %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(pSubCB->commandBuffer).c_str(), report_data->FormatHandle(pSubCB->primaryCommandBuffer).c_str()); } } skip |= ValidateCommandBufferState(pCB, "vkQueueSubmit()", current_submit_count, "VUID-vkQueueSubmit-pCommandBuffers-00072"); return skip; } bool CoreChecks::ValidateFenceForSubmit(FENCE_NODE *pFence) { bool skip = false; if (pFence && pFence->scope == kSyncScopeInternal) { if (pFence->state == FENCE_INFLIGHT) { // TODO: opportunities for "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueBindSparse-fence-01114", // "VUID-vkAcquireNextImageKHR-fence-01287" skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(pFence->fence), kVUID_Core_DrawState_InvalidFence, "Fence %s is already in use by another submission.", report_data->FormatHandle(pFence->fence).c_str()); } else if (pFence->state == FENCE_RETIRED) { // TODO: opportunities for "VUID-vkQueueSubmit-fence-00063", "VUID-vkQueueBindSparse-fence-01113", // "VUID-vkAcquireNextImageKHR-fence-01287" skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(pFence->fence), kVUID_Core_MemTrack_FenceState, "Fence %s submitted in SIGNALED state. Fences must be reset before being submitted", report_data->FormatHandle(pFence->fence).c_str()); } } return skip; } void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence, VkResult result) { uint64_t early_retire_seq = 0; auto pQueue = GetQueueState(queue); auto pFence = GetFenceNode(fence); if (pFence) { if (pFence->scope == kSyncScopeInternal) { // Mark fence in use SubmitFence(pQueue, pFence, std::max(1u, submitCount)); if (!submitCount) { // If no submissions, but just dropping a fence on the end of the queue, // record an empty submission with just the fence, so we can determine // its completion. pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence); } } else { // Retire work up until this fence early, we will not see the wait that corresponds to this signal early_retire_seq = pQueue->seq + pQueue->submissions.size(); if (!external_sync_warning) { external_sync_warning = true; log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence), kVUID_Core_DrawState_QueueForwardProgress, "vkQueueSubmit(): Signaling external fence %s on queue %s will disable validation of preceding command " "buffer lifecycle states and the in-use status of associated objects.", report_data->FormatHandle(fence).c_str(), report_data->FormatHandle(queue).c_str()); } } } // Now process each individual submit for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { std::vector<VkCommandBuffer> cbs; const VkSubmitInfo *submit = &pSubmits[submit_idx]; vector<SEMAPHORE_WAIT> semaphore_waits; vector<VkSemaphore> semaphore_signals; vector<VkSemaphore> semaphore_externals; for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pWaitSemaphores[i]; auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore) { if (pSemaphore->scope == kSyncScopeInternal) { if (pSemaphore->signaler.first != VK_NULL_HANDLE) { semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second}); pSemaphore->in_use.fetch_add(1); } pSemaphore->signaler.first = VK_NULL_HANDLE; pSemaphore->signaled = false; } else { semaphore_externals.push_back(semaphore); pSemaphore->in_use.fetch_add(1); if (pSemaphore->scope == kSyncScopeExternalTemporary) { pSemaphore->scope = kSyncScopeInternal; } } } } for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pSignalSemaphores[i]; auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore) { if (pSemaphore->scope == kSyncScopeInternal) { pSemaphore->signaler.first = queue; pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1; pSemaphore->signaled = true; pSemaphore->in_use.fetch_add(1); semaphore_signals.push_back(semaphore); } else { // Retire work up until this submit early, we will not see the wait that corresponds to this signal early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1); if (!external_sync_warning) { external_sync_warning = true; log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "vkQueueSubmit(): Signaling external semaphore %s on queue %s will disable validation of preceding " "command buffer lifecycle states and the in-use status of associated objects.", report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(queue).c_str()); } } } } for (uint32_t i = 0; i < submit->commandBufferCount; i++) { auto cb_node = GetCBNode(submit->pCommandBuffers[i]); if (cb_node) { cbs.push_back(submit->pCommandBuffers[i]); for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) { cbs.push_back(secondaryCmdBuffer->commandBuffer); UpdateCmdBufImageLayouts(secondaryCmdBuffer); IncrementResources(secondaryCmdBuffer); RecordQueuedQFOTransfers(secondaryCmdBuffer); } UpdateCmdBufImageLayouts(cb_node); IncrementResources(cb_node); RecordQueuedQFOTransfers(cb_node); } } pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals, submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE); } if (early_retire_seq) { RetireWorkOnQueue(pQueue, early_retire_seq); } if (GetEnables()->gpu_validation) { GpuPostCallQueueSubmit(queue, submitCount, pSubmits, fence); } } bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) { auto pFence = GetFenceNode(fence); bool skip = ValidateFenceForSubmit(pFence); if (skip) { return true; } unordered_set<VkSemaphore> signaled_semaphores; unordered_set<VkSemaphore> unsignaled_semaphores; unordered_set<VkSemaphore> internal_semaphores; vector<VkCommandBuffer> current_cmds; unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap; // Now verify each individual submit for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { skip |= ValidateStageMaskGsTsEnables( submit->pWaitDstStageMask[i], "vkQueueSubmit()", "VUID-VkSubmitInfo-pWaitDstStageMask-00076", "VUID-VkSubmitInfo-pWaitDstStageMask-00077", "VUID-VkSubmitInfo-pWaitDstStageMask-02089", "VUID-VkSubmitInfo-pWaitDstStageMask-02090"); VkSemaphore semaphore = submit->pWaitSemaphores[i]; auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (unsignaled_semaphores.count(semaphore) || (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "Queue %s is waiting on semaphore %s that has no way to be signaled.", report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str()); } else { signaled_semaphores.erase(semaphore); unsignaled_semaphores.insert(semaphore); } } if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) { internal_semaphores.insert(semaphore); } } for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pSignalSemaphores[i]; auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "Queue %s is signaling semaphore %s that was previously signaled by queue %s but has not since " "been waited on by any queue.", report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(pSemaphore->signaler.first).c_str()); } else { unsignaled_semaphores.erase(semaphore); signaled_semaphores.insert(semaphore); } } } QFOTransferCBScoreboards<VkImageMemoryBarrier> qfo_image_scoreboards; QFOTransferCBScoreboards<VkBufferMemoryBarrier> qfo_buffer_scoreboards; for (uint32_t i = 0; i < submit->commandBufferCount; i++) { auto cb_node = GetCBNode(submit->pCommandBuffers[i]); if (cb_node) { skip |= ValidateCmdBufImageLayouts(cb_node, imageLayoutMap, localImageLayoutMap); current_cmds.push_back(submit->pCommandBuffers[i]); skip |= ValidatePrimaryCommandBufferState( cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]), &qfo_image_scoreboards, &qfo_buffer_scoreboards); skip |= ValidateQueueFamilyIndices(cb_node, queue); // Potential early exit here as bad object state may crash in delayed function calls if (skip) { return true; } // Call submit-time functions to validate/update state for (auto &function : cb_node->queue_submit_functions) { skip |= function(); } for (auto &function : cb_node->eventUpdates) { skip |= function(queue); } for (auto &function : cb_node->queryUpdates) { skip |= function(queue); } } } auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupSubmitInfo>(submit->pNext); if (chained_device_group_struct && chained_device_group_struct->commandBufferCount > 0) { for (uint32_t i = 0; i < chained_device_group_struct->commandBufferCount; ++i) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->pCommandBufferDeviceMasks[i], VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue), "VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086"); } } } return skip; } #ifdef VK_USE_PLATFORM_ANDROID_KHR // Android-specific validation that uses types defined only on Android and only for NDK versions // that support the VK_ANDROID_external_memory_android_hardware_buffer extension. // This chunk could move into a seperate core_validation_android.cpp file... ? // clang-format off // Map external format and usage flags to/from equivalent Vulkan flags // (Tables as of v1.1.92) // AHardwareBuffer Format Vulkan Format // ====================== ============= // AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM VK_FORMAT_R8G8B8A8_UNORM // AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM VK_FORMAT_R8G8B8A8_UNORM // AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM VK_FORMAT_R8G8B8_UNORM // AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM VK_FORMAT_R5G6B5_UNORM_PACK16 // AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT VK_FORMAT_R16G16B16A16_SFLOAT // AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM VK_FORMAT_A2B10G10R10_UNORM_PACK32 // AHARDWAREBUFFER_FORMAT_D16_UNORM VK_FORMAT_D16_UNORM // AHARDWAREBUFFER_FORMAT_D24_UNORM VK_FORMAT_X8_D24_UNORM_PACK32 // AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT VK_FORMAT_D24_UNORM_S8_UINT // AHARDWAREBUFFER_FORMAT_D32_FLOAT VK_FORMAT_D32_SFLOAT // AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT VK_FORMAT_D32_SFLOAT_S8_UINT // AHARDWAREBUFFER_FORMAT_S8_UINT VK_FORMAT_S8_UINT // The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan // as uint32_t. Casting the enums here avoids scattering casts around in the code. std::map<uint32_t, VkFormat> ahb_format_map_a2v = { { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, VK_FORMAT_R8G8B8A8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT, VK_FORMAT_R16G16B16A16_SFLOAT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM, VK_FORMAT_A2B10G10R10_UNORM_PACK32 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM, VK_FORMAT_X8_D24_UNORM_PACK32 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT, VK_FORMAT_D32_SFLOAT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT, VK_FORMAT_S8_UINT } }; // AHardwareBuffer Usage Vulkan Usage or Creation Flag (Intermixed - Aargh!) // ===================== =================================================== // None VK_IMAGE_USAGE_TRANSFER_SRC_BIT // None VK_IMAGE_USAGE_TRANSFER_DST_BIT // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_SAMPLED_BIT // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE None // AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT VK_IMAGE_CREATE_PROTECTED_BIT // None VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT // None VK_IMAGE_CREATE_EXTENDED_USAGE_BIT // Same casting rationale. De-mixing the table to prevent type confusion and aliasing std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = { { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent }; std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = { { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT, VK_IMAGE_CREATE_PROTECTED_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent }; std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = { { VK_IMAGE_USAGE_SAMPLED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE }, { VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE }, { VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT }, }; std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = { { VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP }, { VK_IMAGE_CREATE_PROTECTED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT }, }; // clang-format on // // AHB-extension new APIs // bool CoreChecks::PreCallValidateGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) { bool skip = false; // buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags. AHardwareBuffer_Desc ahb_desc; AHardwareBuffer_describe(buffer, &ahb_desc); uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE | AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER; if (0 == (ahb_desc.usage & required_flags)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884", "vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64 ") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.", ahb_desc.usage); } return skip; } void CoreChecks::PostCallRecordGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties, VkResult result) { if (VK_SUCCESS != result) return; auto ahb_format_props = lvl_find_in_chain<VkAndroidHardwareBufferFormatPropertiesANDROID>(pProperties->pNext); if (ahb_format_props) { auto ext_formats = GetAHBExternalFormatsSet(); ext_formats->insert(ahb_format_props->externalFormat); } } bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBuffer(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo, struct AHardwareBuffer **pBuffer) { bool skip = false; DEVICE_MEM_INFO *mem_info = GetMemObjInfo(pInfo->memory); // VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in // VkExportMemoryAllocateInfoKHR::handleTypes when memory was created. if (!mem_info->is_export || (0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882", "vkGetMemoryAndroidHardwareBufferANDROID: The VkDeviceMemory (%s) was not allocated for export, or the " "export handleTypes (0x%" PRIx32 ") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.", report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags); } // If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo // with non-NULL image member, then that image must already be bound to memory. if (mem_info->is_dedicated && (VK_NULL_HANDLE != mem_info->dedicated_image)) { auto image_state = GetImageState(mem_info->dedicated_image); if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count(pInfo->memory)))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883", "vkGetMemoryAndroidHardwareBufferANDROID: The VkDeviceMemory (%s) was allocated using a dedicated " "image (%s), but that image is not bound to the VkDeviceMemory object.", report_data->FormatHandle(pInfo->memory).c_str(), report_data->FormatHandle(mem_info->dedicated_image).c_str()); } } return skip; } // // AHB-specific validation within non-AHB APIs // bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) { bool skip = false; auto import_ahb_info = lvl_find_in_chain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext); auto exp_mem_alloc_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(alloc_info->pNext); auto mem_ded_alloc_info = lvl_find_in_chain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext); if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) { // This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID AHardwareBuffer_Desc ahb_desc = {}; AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc); // If buffer is not NULL, it must be a valid Android hardware buffer object with AHardwareBuffer_Desc::format and // AHardwareBuffer_Desc::usage compatible with Vulkan as described in Android Hardware Buffers. // // BLOB & GPU_DATA_BUFFER combo specifically allowed if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) { // Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables // Usage must have at least one bit from the table. It may have additional bits not in the table uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE | AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT; if ((0 == (ahb_desc.usage & ahb_equiv_usage_bits)) || (0 == ahb_format_map_a2v.count(ahb_desc.format))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881", "vkAllocateMemory: The AHardwareBuffer_Desc's format ( %u ) and/or usage ( 0x%" PRIx64 " ) are not compatible with Vulkan.", ahb_desc.format, ahb_desc.usage); } } // Collect external buffer info VkPhysicalDeviceExternalBufferInfo pdebi = {}; pdebi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO; pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) { pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE]; } if (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT & ahb_desc.usage) { pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT]; } VkExternalBufferProperties ext_buf_props = {}; ext_buf_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES; DispatchGetPhysicalDeviceExternalBufferProperties(physical_device, &pdebi, &ext_buf_props); // Collect external format info VkPhysicalDeviceExternalImageFormatInfo pdeifi = {}; pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO; pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; VkPhysicalDeviceImageFormatInfo2 pdifi2 = {}; pdifi2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2; pdifi2.pNext = &pdeifi; if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format]; pdifi2.type = VK_IMAGE_TYPE_2D; // Seems likely pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL; // Ditto if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) { pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE]; } if (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT & ahb_desc.usage) { pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT]; } if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) { pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP]; } if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) { pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT]; } VkExternalImageFormatProperties ext_img_fmt_props = {}; ext_img_fmt_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES; VkImageFormatProperties2 ifp2 = {}; ifp2.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2; ifp2.pNext = &ext_img_fmt_props; VkResult fmt_lookup_result = GetPDImageFormatProperties2(&pdifi2, &ifp2); // If buffer is not NULL, Android hardware buffers must be supported for import, as reported by // VkExternalImageFormatProperties or VkExternalBufferProperties. if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) { if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880", "vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties " "structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag."); } } // Retrieve buffer and format properties of the provided AHardwareBuffer VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {}; ahb_format_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID; VkAndroidHardwareBufferPropertiesANDROID ahb_props = {}; ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; ahb_props.pNext = &ahb_format_props; DispatchGetAndroidHardwareBufferPropertiesANDROID(device, import_ahb_info->buffer, &ahb_props); // allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer if (alloc_info->allocationSize != ahb_props.allocationSize) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-allocationSize-02383", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct, allocationSize (%" PRId64 ") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").", alloc_info->allocationSize, ahb_props.allocationSize); } // memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer // Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex; if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct, memoryTypeIndex (%" PRId32 ") does not correspond to a bit set in AHardwareBuffer's reported " "memoryTypeBits bitmask (0x%" PRIx32 ").", alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits); } // Checks for allocations without a dedicated allocation requirement if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) { // the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes // AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_format_props.externalFormat) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02384", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct without a dedicated allocation requirement, while the AHardwareBuffer's external format (0x%" PRIx64 ") is not AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64 ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.", ahb_format_props.externalFormat, ahb_desc.usage); } } else { // Checks specific to import with a dedicated allocation requirement VkImageCreateInfo *ici = &(GetImageState(mem_ded_alloc_info->image)->createInfo); // The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT or // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02386", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a " "dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64 ") contains neither AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.", ahb_desc.usage); } // the format of image must be VK_FORMAT_UNDEFINED or the format returned by // vkGetAndroidHardwareBufferPropertiesANDROID if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02387", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's " "format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).", string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format)); } // The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) || (ici->arrayLayers != ahb_desc.layers)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02388", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's " "width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32 ") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").", ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height, ahb_desc.layers); } // If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must // have either a full mipmap chain or exactly 1 mip level. // // NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead, // its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates // that the Android hardware buffer contains only a single mip level." // // TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct. // Clarification requested. if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) && (ici->mipLevels != FullMipChainLevels(ici->extent))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02389", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32 ") is neither 1 nor full mip " "chain levels (%" PRId32 ").", ici->mipLevels, FullMipChainLevels(ici->extent)); } // each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a // corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's // AHardwareBuffer_Desc::usage if (ici->usage & ~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02390", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "dedicated image usage bits include one or more with no AHardwareBuffer equivalent."); } bool illegal_usage = false; std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT}; for (VkImageUsageFlags ubit : usages) { if (ici->usage & ubit) { uint64_t ahb_usage = ahb_usage_map_v2a[ubit]; if (0 == (ahb_usage & ahb_desc.usage)) illegal_usage = true; } } if (illegal_usage) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02390", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, one or more AHardwareBuffer usage bits equivalent to " "the provided image's usage bits are missing from AHardwareBuffer_Desc.usage."); } } } else { // Not an import if ((exp_mem_alloc_info) && (mem_ded_alloc_info) && (0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) && (VK_NULL_HANDLE != mem_ded_alloc_info->image)) { // This is an Android HW Buffer export if (0 != alloc_info->allocationSize) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-01874", "vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, " "but allocationSize is non-zero."); } } else { if (0 == alloc_info->allocationSize) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-01874", "vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0."); }; } } return skip; } bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(const VkImage image) { bool skip = false; IMAGE_STATE *image_state = GetImageState(image); if (image_state->imported_ahb && (0 == image_state->GetBoundMemory().size())) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-VkImageMemoryRequirementsInfo2-image-01897", "vkGetImageMemoryRequirements2: Attempt to query layout from an image created with " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been " "bound to memory."); } return skip; } static bool ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data *report_data, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) { bool skip = false; const VkAndroidHardwareBufferUsageANDROID *ahb_usage = lvl_find_in_chain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext); if (nullptr != ahb_usage) { const VkPhysicalDeviceExternalImageFormatInfo *pdeifi = lvl_find_in_chain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext); if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868", "vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained " "VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained " "VkPhysicalDeviceExternalImageFormatInfo struct with handleType " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID."); } } return skip; } bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info) { const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext); if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) { if (VK_FORMAT_UNDEFINED != create_info->format) { return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904", "vkCreateSamplerYcbcrConversion[KHR]: CreateInfo format is not VK_FORMAT_UNDEFINED while " "there is a chained VkExternalFormatANDROID struct."); } } else if (VK_FORMAT_UNDEFINED == create_info->format) { return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904", "vkCreateSamplerYcbcrConversion[KHR]: CreateInfo format is VK_FORMAT_UNDEFINED with no chained " "VkExternalFormatANDROID struct."); } return false; } void CoreChecks::RecordCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info, VkSamplerYcbcrConversion ycbcr_conversion) { const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext); if (ext_format_android && (0 != ext_format_android->externalFormat)) { ycbcr_conversion_ahb_fmt_map.emplace(ycbcr_conversion, ext_format_android->externalFormat); } }; void CoreChecks::RecordDestroySamplerYcbcrConversionANDROID(VkSamplerYcbcrConversion ycbcr_conversion) { ycbcr_conversion_ahb_fmt_map.erase(ycbcr_conversion); }; #else // !VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) { return false; } static bool ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data *report_data, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) { return false; } bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info) { return false; } bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(const VkImage image) { return false; } void CoreChecks::RecordCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info, VkSamplerYcbcrConversion ycbcr_conversion){}; void CoreChecks::RecordDestroySamplerYcbcrConversionANDROID(VkSamplerYcbcrConversion ycbcr_conversion){}; #endif // VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) { bool skip = false; if (memObjMap.size() >= phys_dev_props.limits.maxMemoryAllocationCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUIDUndefined, "Number of currently valid memory objects is not less than the maximum allowed (%u).", phys_dev_props.limits.maxMemoryAllocationCount); } if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) { skip |= ValidateAllocateMemoryANDROID(pAllocateInfo); } else { if (0 == pAllocateInfo->allocationSize) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-allocationSize-00638", "vkAllocateMemory: allocationSize is 0."); }; } auto chained_flags_struct = lvl_find_in_chain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext); if (chained_flags_struct && chained_flags_struct->flags == VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_flags_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675"); skip |= ValidateDeviceMaskToZero(chained_flags_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676"); } // TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744 return skip; } void CoreChecks::PostCallRecordAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory, VkResult result) { if (VK_SUCCESS == result) { AddMemObjInfo(device, *pMemory, pAllocateInfo); } return; } // For given obj node, if it is use, flag a validation error and return callback result, else return false bool CoreChecks::ValidateObjectNotInUse(BASE_NODE *obj_node, VK_OBJECT obj_struct, const char *caller_name, const char *error_code) { if (disabled.object_in_use) return false; bool skip = false; if (obj_node->in_use.load()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle, error_code, "Cannot call %s on %s %s that is currently in use by a command buffer.", caller_name, object_string[obj_struct.type], report_data->FormatHandle(obj_struct.handle).c_str()); } return skip; } bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) { DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem); VK_OBJECT obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory}; bool skip = false; if (mem_info) { skip |= ValidateObjectNotInUse(mem_info, obj_struct, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677"); } return skip; } void CoreChecks::PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) { if (!mem) return; DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem); VK_OBJECT obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory}; // Clear mem binding for any bound objects for (auto obj : mem_info->obj_bindings) { log_msg(report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle, kVUID_Core_MemTrack_FreedMemRef, "VK Object %s still has a reference to mem obj %s.", report_data->FormatHandle(obj.handle).c_str(), report_data->FormatHandle(mem_info->mem).c_str()); BINDABLE *bindable_state = nullptr; switch (obj.type) { case kVulkanObjectTypeImage: bindable_state = GetImageState(reinterpret_cast<VkImage &>(obj.handle)); break; case kVulkanObjectTypeBuffer: bindable_state = GetBufferState(reinterpret_cast<VkBuffer &>(obj.handle)); break; default: // Should only have buffer or image objects bound to memory assert(0); } assert(bindable_state); bindable_state->binding.mem = MEMORY_UNBOUND; bindable_state->UpdateBoundMemorySet(); } // Any bound cmd buffers are now invalid InvalidateCommandBuffers(mem_info->cb_bindings, obj_struct); memObjMap.erase(mem); } // Validate that given Map memory range is valid. This means that the memory should not already be mapped, // and that the size of the map range should be: // 1. Not zero // 2. Within the size of the memory allocation bool CoreChecks::ValidateMapMemRange(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { bool skip = false; if (size == 0) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap, "VkMapMemory: Attempting to map memory range of size zero"); } auto mem_element = memObjMap.find(mem); if (mem_element != memObjMap.end()) { auto mem_info = mem_element->second.get(); // It is an application error to call VkMapMemory on an object that is already mapped if (mem_info->mem_range.size != 0) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap, "VkMapMemory: Attempting to map memory on an already-mapped object %s.", report_data->FormatHandle(mem).c_str()); } // Validate that offset + size is within object's allocationSize if (size == VK_WHOLE_SIZE) { if (offset >= mem_info->alloc_info.allocationSize) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap, "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64, offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize); } } else { if ((offset + size) > mem_info->alloc_info.allocationSize) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), "VUID-vkMapMemory-size-00681", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".", offset, size + offset, mem_info->alloc_info.allocationSize); } } } return skip; } void CoreChecks::StoreMemRanges(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { auto mem_info = GetMemObjInfo(mem); if (mem_info) { mem_info->mem_range.offset = offset; mem_info->mem_range.size = size; } } // Guard value for pad data static char NoncoherentMemoryFillValue = 0xb; void CoreChecks::InitializeAndTrackMemory(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, void **ppData) { auto mem_info = GetMemObjInfo(mem); if (mem_info) { mem_info->p_driver_data = *ppData; uint32_t index = mem_info->alloc_info.memoryTypeIndex; if (phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) { mem_info->shadow_copy = 0; } else { if (size == VK_WHOLE_SIZE) { size = mem_info->alloc_info.allocationSize - offset; } mem_info->shadow_pad_size = phys_dev_props.limits.minMemoryMapAlignment; assert(SafeModulo(mem_info->shadow_pad_size, phys_dev_props.limits.minMemoryMapAlignment) == 0); // Ensure start of mapped region reflects hardware alignment constraints uint64_t map_alignment = phys_dev_props.limits.minMemoryMapAlignment; // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment. uint64_t start_offset = offset % map_alignment; // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes. mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset)); mem_info->shadow_copy = reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) & ~(map_alignment - 1)) + start_offset; assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset, map_alignment) == 0); memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size)); *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size; } } } // Verify that state for fence being waited on is appropriate. That is, // a fence being waited on should not already be signaled and // it should have been submitted on a queue or during acquire next image bool CoreChecks::VerifyWaitFenceState(VkFence fence, const char *apiCall) { bool skip = false; auto pFence = GetFenceNode(fence); if (pFence && pFence->scope == kSyncScopeInternal) { if (pFence->state == FENCE_UNSIGNALED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence), kVUID_Core_MemTrack_FenceState, "%s called for fence %s which has not been submitted on a Queue or during acquire next image.", apiCall, report_data->FormatHandle(fence).c_str()); } } return skip; } void CoreChecks::RetireFence(VkFence fence) { auto pFence = GetFenceNode(fence); if (pFence && pFence->scope == kSyncScopeInternal) { if (pFence->signaler.first != VK_NULL_HANDLE) { // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed. RetireWorkOnQueue(GetQueueState(pFence->signaler.first), pFence->signaler.second); } else { // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark // the fence as retired. pFence->state = FENCE_RETIRED; } } } bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) { // Verify fence status of submitted fences bool skip = false; for (uint32_t i = 0; i < fenceCount; i++) { skip |= VerifyWaitFenceState(pFences[i], "vkWaitForFences"); skip |= VerifyQueueStateToFence(pFences[i]); } return skip; } void CoreChecks::PostCallRecordWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout, VkResult result) { if (VK_SUCCESS != result) return; // When we know that all fences are complete we can clean/remove their CBs if ((VK_TRUE == waitAll) || (1 == fenceCount)) { for (uint32_t i = 0; i < fenceCount; i++) { RetireFence(pFences[i]); } } // NOTE : Alternate case not handled here is when some fences have completed. In // this case for app to guarantee which fences completed it will have to call // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete. } bool CoreChecks::PreCallValidateGetFenceStatus(VkDevice device, VkFence fence) { return VerifyWaitFenceState(fence, "vkGetFenceStatus()"); } void CoreChecks::PostCallRecordGetFenceStatus(VkDevice device, VkFence fence, VkResult result) { if (VK_SUCCESS != result) return; RetireFence(fence); } void CoreChecks::RecordGetDeviceQueueState(uint32_t queue_family_index, VkQueue queue) { // Add queue to tracking set only if it is new auto queue_is_new = queues.emplace(queue); if (queue_is_new.second == true) { QUEUE_STATE *queue_state = &queueMap[queue]; queue_state->queue = queue; queue_state->queueFamilyIndex = queue_family_index; queue_state->seq = 0; } } bool CoreChecks::ValidateGetDeviceQueue(uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue, const char *valid_qfi_vuid, const char *qfi_in_range_vuid) { bool skip = false; skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex", valid_qfi_vuid); const auto &queue_data = queue_family_index_map.find(queueFamilyIndex); if (queue_data != queue_family_index_map.end() && queue_data->second <= queueIndex) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), qfi_in_range_vuid, "vkGetDeviceQueue: queueIndex (=%" PRIu32 ") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32 ") when the device was created (i.e. is not less than %" PRIu32 ").", queueIndex, queueFamilyIndex, queue_data->second); } return skip; } bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) { return ValidateGetDeviceQueue(queueFamilyIndex, queueIndex, pQueue, "VUID-vkGetDeviceQueue-queueFamilyIndex-00384", "VUID-vkGetDeviceQueue-queueIndex-00385"); } void CoreChecks::PostCallRecordGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) { RecordGetDeviceQueueState(queueFamilyIndex, *pQueue); } void CoreChecks::PostCallRecordGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) { RecordGetDeviceQueueState(pQueueInfo->queueFamilyIndex, *pQueue); } bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) { QUEUE_STATE *queue_state = GetQueueState(queue); return VerifyQueueStateToSeq(queue_state, queue_state->seq + queue_state->submissions.size()); } void CoreChecks::PostCallRecordQueueWaitIdle(VkQueue queue, VkResult result) { if (VK_SUCCESS != result) return; QUEUE_STATE *queue_state = GetQueueState(queue); RetireWorkOnQueue(queue_state, queue_state->seq + queue_state->submissions.size()); } bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) { bool skip = false; for (auto &queue : queueMap) { skip |= VerifyQueueStateToSeq(&queue.second, queue.second.seq + queue.second.submissions.size()); } return skip; } void CoreChecks::PostCallRecordDeviceWaitIdle(VkDevice device, VkResult result) { if (VK_SUCCESS != result) return; for (auto &queue : queueMap) { RetireWorkOnQueue(&queue.second, queue.second.seq + queue.second.submissions.size()); } } bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) { FENCE_NODE *fence_node = GetFenceNode(fence); bool skip = false; if (fence_node) { if (fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence), "VUID-vkDestroyFence-fence-01120", "Fence %s is in use.", report_data->FormatHandle(fence).c_str()); } } return skip; } void CoreChecks::PreCallRecordDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) { if (!fence) return; fenceMap.erase(fence); } bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) { SEMAPHORE_NODE *sema_node = GetSemaphoreNode(semaphore); VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore}; bool skip = false; if (sema_node) { skip |= ValidateObjectNotInUse(sema_node, obj_struct, "vkDestroySemaphore", "VUID-vkDestroySemaphore-semaphore-01137"); } return skip; } void CoreChecks::PreCallRecordDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) { if (!semaphore) return; semaphoreMap.erase(semaphore); } bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) { EVENT_STATE *event_state = GetEventNode(event); VK_OBJECT obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent}; bool skip = false; if (event_state) { skip |= ValidateObjectNotInUse(event_state, obj_struct, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145"); } return skip; } void CoreChecks::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) { if (!event) return; EVENT_STATE *event_state = GetEventNode(event); VK_OBJECT obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent}; InvalidateCommandBuffers(event_state->cb_bindings, obj_struct); eventMap.erase(event); } bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) { if (disabled.query_validation) return false; QUERY_POOL_NODE *qp_state = GetQueryPoolNode(queryPool); VK_OBJECT obj_struct = {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}; bool skip = false; if (qp_state) { skip |= ValidateObjectNotInUse(qp_state, obj_struct, "vkDestroyQueryPool", "VUID-vkDestroyQueryPool-queryPool-00793"); } return skip; } void CoreChecks::PreCallRecordDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) { if (!queryPool) return; QUERY_POOL_NODE *qp_state = GetQueryPoolNode(queryPool); VK_OBJECT obj_struct = {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}; InvalidateCommandBuffers(qp_state->cb_bindings, obj_struct); queryPoolMap.erase(queryPool); } bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) { if (disabled.query_validation) return false; bool skip = false; auto query_pool_state = queryPoolMap.find(queryPool); if (query_pool_state != queryPoolMap.end()) { if ((query_pool_state->second.createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, "VUID-vkGetQueryPoolResults-queryType-00818", "QueryPool %s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains " "VK_QUERY_RESULT_PARTIAL_BIT.", report_data->FormatHandle(queryPool).c_str()); } } return skip; } void CoreChecks::PostCallRecordGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags, VkResult result) { if ((VK_SUCCESS != result) && (VK_NOT_READY != result)) return; // TODO: clean this up, it's insanely wasteful. unordered_map<QueryObject, std::vector<VkCommandBuffer>> queries_in_flight; for (auto cmd_buffer : commandBufferMap) { if (cmd_buffer.second->in_use.load()) { for (auto query_state_pair : cmd_buffer.second->queryToStateMap) { queries_in_flight[query_state_pair.first].push_back(cmd_buffer.first); } } } for (uint32_t i = 0; i < queryCount; ++i) { QueryObject query = {queryPool, firstQuery + i}; auto qif_pair = queries_in_flight.find(query); auto query_state_pair = queryToStateMap.find(query); if (query_state_pair != queryToStateMap.end()) { // Available and in flight if (qif_pair != queries_in_flight.end() && query_state_pair != queryToStateMap.end() && query_state_pair->second) { for (auto cmd_buffer : qif_pair->second) { auto cb = GetCBNode(cmd_buffer); auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query); if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) { for (auto event : query_event_pair->second) { eventMap[event].needsSignaled = true; } } } } } } } // Return true if given ranges intersect, else false // Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted // in an error so not checking that here // pad_ranges bool indicates a linear and non-linear comparison which requires padding // In the case where padding is required, if an alias is encountered then a validation error is reported and skip // may be set by the callback function so caller should merge in skip value if padding case is possible. // This check can be skipped by passing skip_checks=true, for call sites outside the validation path. bool CoreChecks::RangesIntersect(MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip, bool skip_checks) { *skip = false; auto r1_start = range1->start; auto r1_end = range1->end; auto r2_start = range2->start; auto r2_end = range2->end; VkDeviceSize pad_align = 1; if (range1->linear != range2->linear) { pad_align = phys_dev_props.limits.bufferImageGranularity; } if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false; if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false; if (!skip_checks && (range1->linear != range2->linear)) { // In linear vs. non-linear case, warn of aliasing const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear"; const char *r1_type_str = range1->image ? "image" : "buffer"; const char *r2_linear_str = range2->linear ? "linear" : "non-linear"; const char *r2_type_str = range2->image ? "image" : "buffer"; auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT; *skip |= log_msg( report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, kVUID_Core_MemTrack_InvalidAliasing, "%s %s %s is aliased with %s %s %s which may indicate a bug. For further info refer to the Buffer-Image Granularity " "section of the Vulkan specification. " "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/xhtml/vkspec.html#resources-bufferimagegranularity)", r1_linear_str, r1_type_str, report_data->FormatHandle(range1->handle).c_str(), r2_linear_str, r2_type_str, report_data->FormatHandle(range2->handle).c_str()); } // Ranges intersect return true; } // Simplified RangesIntersect that calls above function to check range1 for intersection with offset & end addresses bool CoreChecks::RangesIntersect(MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) { // Create a local MEMORY_RANGE struct to wrap offset/size MEMORY_RANGE range_wrap; // Synch linear with range1 to avoid padding and potential validation error case range_wrap.linear = range1->linear; range_wrap.start = offset; range_wrap.end = end; bool tmp_bool; return RangesIntersect(range1, &range_wrap, &tmp_bool, true); } bool CoreChecks::ValidateInsertMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image, bool is_linear, const char *api_name) { bool skip = false; MEMORY_RANGE range; range.image = is_image; range.handle = handle; range.linear = is_linear; range.memory = mem_info->mem; range.start = memoryOffset; range.size = memRequirements.size; range.end = memoryOffset + memRequirements.size - 1; range.aliases.clear(); // Check for aliasing problems. for (auto &obj_range_pair : mem_info->bound_ranges) { auto check_range = &obj_range_pair.second; bool intersection_error = false; if (RangesIntersect(&range, check_range, &intersection_error, false)) { skip |= intersection_error; range.aliases.insert(check_range); } } if (memoryOffset >= mem_info->alloc_info.allocationSize) { const char *error_code = is_image ? "VUID-vkBindImageMemory-memoryOffset-01046" : "VUID-vkBindBufferMemory-memoryOffset-01031"; skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_info->mem), error_code, "In %s, attempting to bind memory (%s) to object (%s), memoryOffset=0x%" PRIxLEAST64 " must be less than the memory allocation size 0x%" PRIxLEAST64 ".", api_name, report_data->FormatHandle(mem_info->mem).c_str(), report_data->FormatHandle(handle).c_str(), memoryOffset, mem_info->alloc_info.allocationSize); } return skip; } // Object with given handle is being bound to memory w/ given mem_info struct. // Track the newly bound memory range with given memoryOffset // Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear // and non-linear range incorrectly overlap. // Return true if an error is flagged and the user callback returns "true", otherwise false // is_image indicates an image object, otherwise handle is for a buffer // is_linear indicates a buffer or linear image void CoreChecks::InsertMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image, bool is_linear) { MEMORY_RANGE range; range.image = is_image; range.handle = handle; range.linear = is_linear; range.memory = mem_info->mem; range.start = memoryOffset; range.size = memRequirements.size; range.end = memoryOffset + memRequirements.size - 1; range.aliases.clear(); // Update Memory aliasing // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges; for (auto &obj_range_pair : mem_info->bound_ranges) { auto check_range = &obj_range_pair.second; bool intersection_error = false; if (RangesIntersect(&range, check_range, &intersection_error, true)) { range.aliases.insert(check_range); tmp_alias_ranges.insert(check_range); } } mem_info->bound_ranges[handle] = std::move(range); for (auto tmp_range : tmp_alias_ranges) { tmp_range->aliases.insert(&mem_info->bound_ranges[handle]); } if (is_image) mem_info->bound_images.insert(handle); else mem_info->bound_buffers.insert(handle); } bool CoreChecks::ValidateInsertImageMemoryRange(VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear, const char *api_name) { return ValidateInsertMemoryRange(HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name); } void CoreChecks::InsertImageMemoryRange(VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear) { InsertMemoryRange(HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear); } bool CoreChecks::ValidateInsertBufferMemoryRange(VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) { return ValidateInsertMemoryRange(HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name); } void CoreChecks::InsertBufferMemoryRange(VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs) { InsertMemoryRange(HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true); } // Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info // is_image indicates if handle is for image or buffer // This function will also remove the handle-to-index mapping from the appropriate // map and clean up any aliases for range being removed. static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) { auto erase_range = &mem_info->bound_ranges[handle]; for (auto alias_range : erase_range->aliases) { alias_range->aliases.erase(erase_range); } erase_range->aliases.clear(); mem_info->bound_ranges.erase(handle); if (is_image) { mem_info->bound_images.erase(handle); } else { mem_info->bound_buffers.erase(handle); } } void CoreChecks::RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); } void CoreChecks::RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); } bool CoreChecks::ValidateMemoryTypes(const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits, const char *funcName, const char *msgCode) { bool skip = false; if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_info->mem), msgCode, "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory " "type (0x%X) of this memory object %s.", funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, report_data->FormatHandle(mem_info->mem).c_str()); } return skip; } bool CoreChecks::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset, const char *api_name) { BUFFER_STATE *buffer_state = GetBufferState(buffer); bool skip = false; if (buffer_state) { // Track objects tied to memory uint64_t buffer_handle = HandleToUint64(buffer); skip = ValidateSetMemBinding(mem, buffer_handle, kVulkanObjectTypeBuffer, api_name); if (!buffer_state->memory_requirements_checked) { // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from // vkGetBufferMemoryRequirements() skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle, kVUID_Core_DrawState_InvalidBuffer, "%s: Binding memory to buffer %s but vkGetBufferMemoryRequirements() has not been called on that buffer.", api_name, report_data->FormatHandle(buffer_handle).c_str()); // Make the call for them so we can verify the state DispatchGetBufferMemoryRequirements(device, buffer, &buffer_state->requirements); } // Validate bound memory range information const auto mem_info = GetMemObjInfo(mem); if (mem_info) { skip |= ValidateInsertBufferMemoryRange(buffer, mem_info, memoryOffset, buffer_state->requirements, api_name); skip |= ValidateMemoryTypes(mem_info, buffer_state->requirements.memoryTypeBits, api_name, "VUID-vkBindBufferMemory-memory-01035"); } // Validate memory requirements alignment if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle, "VUID-vkBindBufferMemory-memoryOffset-01036", "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetBufferMemoryRequirements with buffer.", api_name, memoryOffset, buffer_state->requirements.alignment); } if (mem_info) { // Validate memory requirements size if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle, "VUID-vkBindBufferMemory-size-01037", "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetBufferMemoryRequirements with buffer.", api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size); } // Validate dedicated allocation if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) { // TODO: Add vkBindBufferMemory2KHR error message when added to spec. auto validation_error = kVUIDUndefined; if (strcmp(api_name, "vkBindBufferMemory()") == 0) { validation_error = "VUID-vkBindBufferMemory-memory-01508"; } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle, validation_error, "%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfoKHR::buffer %s must be equal " "to buffer %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(mem_info->dedicated_buffer).c_str(), report_data->FormatHandle(buffer_handle).c_str(), memoryOffset); } } } return skip; } bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) { const char *api_name = "vkBindBufferMemory()"; return ValidateBindBufferMemory(buffer, mem, memoryOffset, api_name); } void CoreChecks::UpdateBindBufferMemoryState(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) { BUFFER_STATE *buffer_state = GetBufferState(buffer); if (buffer_state) { // Track bound memory range information auto mem_info = GetMemObjInfo(mem); if (mem_info) { InsertBufferMemoryRange(buffer, mem_info, memoryOffset, buffer_state->requirements); } // Track objects tied to memory uint64_t buffer_handle = HandleToUint64(buffer); SetMemBinding(mem, buffer_state, memoryOffset, buffer_handle, kVulkanObjectTypeBuffer); } } void CoreChecks::PostCallRecordBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset, VkResult result) { if (VK_SUCCESS != result) return; UpdateBindBufferMemoryState(buffer, mem, memoryOffset); } bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } void CoreChecks::PostCallRecordBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) { for (uint32_t i = 0; i < bindInfoCount; i++) { UpdateBindBufferMemoryState(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset); } } void CoreChecks::PostCallRecordBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) { for (uint32_t i = 0; i < bindInfoCount; i++) { UpdateBindBufferMemoryState(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset); } } void CoreChecks::RecordGetBufferMemoryRequirementsState(VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) { BUFFER_STATE *buffer_state = GetBufferState(buffer); if (buffer_state) { buffer_state->requirements = *pMemoryRequirements; buffer_state->memory_requirements_checked = true; } } void CoreChecks::PostCallRecordGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) { RecordGetBufferMemoryRequirementsState(buffer, pMemoryRequirements); } void CoreChecks::PostCallRecordGetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo, VkMemoryRequirements2KHR *pMemoryRequirements) { RecordGetBufferMemoryRequirementsState(pInfo->buffer, &pMemoryRequirements->memoryRequirements); } void CoreChecks::PostCallRecordGetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo, VkMemoryRequirements2KHR *pMemoryRequirements) { RecordGetBufferMemoryRequirementsState(pInfo->buffer, &pMemoryRequirements->memoryRequirements); } bool CoreChecks::ValidateGetImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2 *pInfo) { bool skip = false; if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) { skip |= ValidateGetImageMemoryRequirements2ANDROID(pInfo->image); } return skip; } bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) { return ValidateGetImageMemoryRequirements2(pInfo); } bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) { return ValidateGetImageMemoryRequirements2(pInfo); } void CoreChecks::RecordGetImageMemoryRequiementsState(VkImage image, VkMemoryRequirements *pMemoryRequirements) { IMAGE_STATE *image_state = GetImageState(image); if (image_state) { image_state->requirements = *pMemoryRequirements; image_state->memory_requirements_checked = true; } } void CoreChecks::PostCallRecordGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) { RecordGetImageMemoryRequiementsState(image, pMemoryRequirements); } void CoreChecks::PostCallRecordGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) { RecordGetImageMemoryRequiementsState(pInfo->image, &pMemoryRequirements->memoryRequirements); } void CoreChecks::PostCallRecordGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) { RecordGetImageMemoryRequiementsState(pInfo->image, &pMemoryRequirements->memoryRequirements); } static void RecordGetImageSparseMemoryRequirementsState(IMAGE_STATE *image_state, VkSparseImageMemoryRequirements *sparse_image_memory_requirements) { image_state->sparse_requirements.emplace_back(*sparse_image_memory_requirements); if (sparse_image_memory_requirements->formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) { image_state->sparse_metadata_required = true; } } void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements *pSparseMemoryRequirements) { auto image_state = GetImageState(image); image_state->get_sparse_reqs_called = true; if (!pSparseMemoryRequirements) return; for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) { RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i]); } } void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo, uint32_t *pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) { auto image_state = GetImageState(pInfo->image); image_state->get_sparse_reqs_called = true; if (!pSparseMemoryRequirements) return; for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) { assert(!pSparseMemoryRequirements[i].pNext); // TODO: If an extension is ever added here we need to handle it RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i].memoryRequirements); } } void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements2KHR( VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo, uint32_t *pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) { auto image_state = GetImageState(pInfo->image); image_state->get_sparse_reqs_called = true; if (!pSparseMemoryRequirements) return; for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) { assert(!pSparseMemoryRequirements[i].pNext); // TODO: If an extension is ever added here we need to handle it RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i].memoryRequirements); } } bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) { // Can't wrap AHB-specific validation in a device extension check here, but no harm bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(report_data, pImageFormatInfo, pImageFormatProperties); return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) { // Can't wrap AHB-specific validation in a device extension check here, but no harm bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(report_data, pImageFormatInfo, pImageFormatProperties); return skip; } void CoreChecks::PreCallRecordDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) { if (!shaderModule) return; shaderModuleMap.erase(shaderModule); } bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) { PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); VK_OBJECT obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline}; bool skip = false; if (pipeline_state) { skip |= ValidateObjectNotInUse(pipeline_state, obj_struct, "vkDestroyPipeline", "VUID-vkDestroyPipeline-pipeline-00765"); } return skip; } void CoreChecks::PreCallRecordDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) { if (!pipeline) return; PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); VK_OBJECT obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline}; // Any bound cmd buffers are now invalid InvalidateCommandBuffers(pipeline_state->cb_bindings, obj_struct); if (GetEnables()->gpu_validation) { GpuPreCallRecordDestroyPipeline(pipeline); } pipelineMap.erase(pipeline); } void CoreChecks::PreCallRecordDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) { if (!pipelineLayout) return; pipelineLayoutMap.erase(pipelineLayout); } bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) { SAMPLER_STATE *sampler_state = GetSamplerState(sampler); VK_OBJECT obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler}; bool skip = false; if (sampler_state) { skip |= ValidateObjectNotInUse(sampler_state, obj_struct, "vkDestroySampler", "VUID-vkDestroySampler-sampler-01082"); } return skip; } void CoreChecks::PreCallRecordDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) { if (!sampler) return; SAMPLER_STATE *sampler_state = GetSamplerState(sampler); VK_OBJECT obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler}; // Any bound cmd buffers are now invalid if (sampler_state) { InvalidateCommandBuffers(sampler_state->cb_bindings, obj_struct); } samplerMap.erase(sampler); } void CoreChecks::PreCallRecordDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) { if (!descriptorSetLayout) return; auto layout_it = descriptorSetLayoutMap.find(descriptorSetLayout); if (layout_it != descriptorSetLayoutMap.end()) { layout_it->second.get()->MarkDestroyed(); descriptorSetLayoutMap.erase(layout_it); } } bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) { DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool); VK_OBJECT obj_struct = {HandleToUint64(descriptorPool), kVulkanObjectTypeDescriptorPool}; bool skip = false; if (desc_pool_state) { skip |= ValidateObjectNotInUse(desc_pool_state, obj_struct, "vkDestroyDescriptorPool", "VUID-vkDestroyDescriptorPool-descriptorPool-00303"); } return skip; } void CoreChecks::PreCallRecordDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) { if (!descriptorPool) return; DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool); VK_OBJECT obj_struct = {HandleToUint64(descriptorPool), kVulkanObjectTypeDescriptorPool}; if (desc_pool_state) { // Any bound cmd buffers are now invalid InvalidateCommandBuffers(desc_pool_state->cb_bindings, obj_struct); // Free sets that were in this pool for (auto ds : desc_pool_state->sets) { FreeDescriptorSet(ds); } descriptorPoolMap.erase(descriptorPool); delete desc_pool_state; } } // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result // If this is a secondary command buffer, then make sure its primary is also in-flight // If primary is not in-flight, then remove secondary from global in-flight set // This function is only valid at a point when cmdBuffer is being reset or freed bool CoreChecks::CheckCommandBufferInFlight(const GLOBAL_CB_NODE *cb_node, const char *action, const char *error_code) { bool skip = false; if (cb_node->in_use.load()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), error_code, "Attempt to %s command buffer (%s) which is in use.", action, report_data->FormatHandle(cb_node->commandBuffer).c_str()); } return skip; } // Iterate over all cmdBuffers in given commandPool and verify that each is not in use bool CoreChecks::CheckCommandBuffersInFlight(COMMAND_POOL_NODE *pPool, const char *action, const char *error_code) { bool skip = false; for (auto cmd_buffer : pPool->commandBuffers) { skip |= CheckCommandBufferInFlight(GetCBNode(cmd_buffer), action, error_code); } return skip; } // Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState void CoreChecks::FreeCommandBufferStates(COMMAND_POOL_NODE *pool_state, const uint32_t command_buffer_count, const VkCommandBuffer *command_buffers) { if (GetEnables()->gpu_validation) { GpuPreCallRecordFreeCommandBuffers(command_buffer_count, command_buffers); } for (uint32_t i = 0; i < command_buffer_count; i++) { auto cb_state = GetCBNode(command_buffers[i]); // Remove references to command buffer's state and delete if (cb_state) { // reset prior to delete, removing various references to it. // TODO: fix this, it's insane. ResetCommandBufferState(cb_state->commandBuffer); // Remove the cb_state's references from COMMAND_POOL_NODEs commandBufferMap.erase(cb_state->commandBuffer); pool_state->commandBuffers.erase(command_buffers[i]); // Remove the cb debug labels EraseCmdDebugUtilsLabel(report_data, cb_state->commandBuffer); delete cb_state; } } } bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) { bool skip = false; for (uint32_t i = 0; i < commandBufferCount; i++) { auto cb_node = GetCBNode(pCommandBuffers[i]); // Delete CB information structure, and remove from commandBufferMap if (cb_node) { skip |= CheckCommandBufferInFlight(cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047"); } } return skip; } void CoreChecks::PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) { auto pPool = GetCommandPoolNode(commandPool); FreeCommandBufferStates(pPool, commandBufferCount, pCommandBuffers); } bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) { return ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex", "VUID-vkCreateCommandPool-queueFamilyIndex-01937"); } void CoreChecks::PostCallRecordCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool, VkResult result) { if (VK_SUCCESS != result) return; commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags; commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex; } bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) { if (disabled.query_validation) return false; bool skip = false; if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) { if (!enabled_features.core.pipelineStatisticsQuery) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, "VUID-VkQueryPoolCreateInfo-queryType-00791", "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with " "VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE."); } } return skip; } void CoreChecks::PostCallRecordCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool, VkResult result) { if (VK_SUCCESS != result) return; QUERY_POOL_NODE *qp_node = &queryPoolMap[*pQueryPool]; qp_node->createInfo = *pCreateInfo; } bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(commandPool); bool skip = false; if (cp_state) { // Verify that command buffers in pool are complete (not in-flight) skip |= CheckCommandBuffersInFlight(cp_state, "destroy command pool with", "VUID-vkDestroyCommandPool-commandPool-00041"); } return skip; } void CoreChecks::PreCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { if (!commandPool) return; COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(commandPool); // Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers // "When a pool is destroyed, all command buffers allocated from the pool are freed." if (cp_state) { // Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration. std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()}; FreeCommandBufferStates(cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data()); commandPoolMap.erase(commandPool); } } bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) { auto command_pool_state = GetCommandPoolNode(commandPool); return CheckCommandBuffersInFlight(command_pool_state, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040"); } void CoreChecks::PostCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags, VkResult result) { if (VK_SUCCESS != result) return; // Reset all of the CBs allocated from this pool auto command_pool_state = GetCommandPoolNode(commandPool); for (auto cmdBuffer : command_pool_state->commandBuffers) { ResetCommandBufferState(cmdBuffer); } } bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) { bool skip = false; for (uint32_t i = 0; i < fenceCount; ++i) { auto pFence = GetFenceNode(pFences[i]); if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(pFences[i]), "VUID-vkResetFences-pFences-01123", "Fence %s is in use.", report_data->FormatHandle(pFences[i]).c_str()); } } return skip; } void CoreChecks::PostCallRecordResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkResult result) { for (uint32_t i = 0; i < fenceCount; ++i) { auto pFence = GetFenceNode(pFences[i]); if (pFence) { if (pFence->scope == kSyncScopeInternal) { pFence->state = FENCE_UNSIGNALED; } else if (pFence->scope == kSyncScopeExternalTemporary) { pFence->scope = kSyncScopeInternal; } } } } // For given cb_nodes, invalidate them and track object causing invalidation void CoreChecks::InvalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) { for (auto cb_node : cb_nodes) { if (cb_node->state == CB_RECORDING) { log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer, "Invalidating a command buffer that's currently being recorded: %s.", report_data->FormatHandle(cb_node->commandBuffer).c_str()); cb_node->state = CB_INVALID_INCOMPLETE; } else if (cb_node->state == CB_RECORDED) { cb_node->state = CB_INVALID_COMPLETE; } cb_node->broken_bindings.push_back(obj); // if secondary, then propagate the invalidation to the primaries that will call us. if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) { InvalidateCommandBuffers(cb_node->linkedCommandBuffers, obj); } } } bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) { FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer); VK_OBJECT obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer}; bool skip = false; if (framebuffer_state) { skip |= ValidateObjectNotInUse(framebuffer_state, obj_struct, "vkDestroyFramebuffer", "VUID-vkDestroyFramebuffer-framebuffer-00892"); } return skip; } void CoreChecks::PreCallRecordDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) { if (!framebuffer) return; FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer); VK_OBJECT obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer}; InvalidateCommandBuffers(framebuffer_state->cb_bindings, obj_struct); frameBufferMap.erase(framebuffer); } bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass); VK_OBJECT obj_struct = {HandleToUint64(renderPass), kVulkanObjectTypeRenderPass}; bool skip = false; if (rp_state) { skip |= ValidateObjectNotInUse(rp_state, obj_struct, "vkDestroyRenderPass", "VUID-vkDestroyRenderPass-renderPass-00873"); } return skip; } void CoreChecks::PreCallRecordDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { if (!renderPass) return; RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass); VK_OBJECT obj_struct = {HandleToUint64(renderPass), kVulkanObjectTypeRenderPass}; InvalidateCommandBuffers(rp_state->cb_bindings, obj_struct); renderPassMap.erase(renderPass); } // Access helper functions for external modules VkFormatProperties CoreChecks::GetPDFormatProperties(const VkFormat format) { VkFormatProperties format_properties; DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &format_properties); return format_properties; } VkResult CoreChecks::GetPDImageFormatProperties(const VkImageCreateInfo *image_ci, VkImageFormatProperties *pImageFormatProperties) { return DispatchGetPhysicalDeviceImageFormatProperties(physical_device, image_ci->format, image_ci->imageType, image_ci->tiling, image_ci->usage, image_ci->flags, pImageFormatProperties); } VkResult CoreChecks::GetPDImageFormatProperties2(const VkPhysicalDeviceImageFormatInfo2 *phys_dev_image_fmt_info, VkImageFormatProperties2 *pImageFormatProperties) { if (!instance_extensions.vk_khr_get_physical_device_properties_2) return VK_ERROR_EXTENSION_NOT_PRESENT; return DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, phys_dev_image_fmt_info, pImageFormatProperties); } const debug_report_data *CoreChecks::GetReportData() { return report_data; } const VkPhysicalDeviceProperties *CoreChecks::GetPDProperties() { return &phys_dev_props; } const VkPhysicalDeviceMemoryProperties *CoreChecks::GetPhysicalDeviceMemoryProperties() { return &phys_dev_mem_props; } const CHECK_DISABLED *CoreChecks::GetDisables() { return &instance_state->disabled; } const CHECK_ENABLED *CoreChecks::GetEnables() { return &instance_state->enabled; } std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *CoreChecks::GetImageMap() { return &imageMap; } std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *CoreChecks::GetImageSubresourceMap() { return &imageSubresourceMap; } std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *CoreChecks::GetImageLayoutMap() { return &imageLayoutMap; } std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *CoreChecks::GetBufferMap() { return &bufferMap; } std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *CoreChecks::GetBufferViewMap() { return &bufferViewMap; } std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *CoreChecks::GetImageViewMap() { return &imageViewMap; } const DeviceFeatures *CoreChecks::GetEnabledFeatures() { return &enabled_features; } const DeviceExtensions *CoreChecks::GetDeviceExtensions() { return &device_extensions; } uint32_t CoreChecks::GetApiVersion() { return api_version; } void CoreChecks::PostCallRecordCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence, VkResult result) { if (VK_SUCCESS != result) return; auto &fence_node = fenceMap[*pFence]; fence_node.fence = *pFence; fence_node.createInfo = *pCreateInfo; fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED; } // Validation cache: // CV is the bottommost implementor of this extension. Don't pass calls down. // utility function to set collective state for pipeline void SetPipelineState(PIPELINE_STATE *pPipe) { // If any attachment used by this pipeline has blendEnable, set top-level blendEnable if (pPipe->graphicsPipelineCI.pColorBlendState) { for (size_t i = 0; i < pPipe->attachments.size(); ++i) { if (VK_TRUE == pPipe->attachments[i].blendEnable) { if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) { pPipe->blendConstantsEnabled = true; } } } } } bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::unique_ptr<PIPELINE_STATE>> const &pipe_state_vec, const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) { bool skip = false; const VkPhysicalDeviceLimits *device_limits = &(GetPDProperties()->limits); for (uint32_t i = 0; i < count; i++) { auto pvids_ci = lvl_find_in_chain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext); if (nullptr == pvids_ci) continue; const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get(); for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) { const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]); if (vibdd->binding >= device_limits->maxVertexInputBindings) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).", i, j, vibdd->binding, device_limits->maxVertexInputBindings); } if (vibdd->divisor > phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).", i, j, vibdd->divisor, phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor); } if ((0 == vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not " "enabled.", i, j); } if ((1 != vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not " "enabled.", i, j, vibdd->divisor); } // Find the corresponding binding description and validate input rate setting bool failed_01871 = true; for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) { if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) && (VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) { failed_01871 = false; break; } } if (failed_01871) { // Description not found, or has incorrect inputRate value skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's " "VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.", i, j, vibdd->binding); } } } return skip; } bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *cgpl_state_data) { bool skip = false; create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data); cgpl_state->pipe_state.reserve(count); for (uint32_t i = 0; i < count; i++) { cgpl_state->pipe_state.push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE)); (cgpl_state->pipe_state)[i]->initGraphicsPipeline(&pCreateInfos[i], GetRenderPassStateSharedPtr(pCreateInfos[i].renderPass)); (cgpl_state->pipe_state)[i]->pipeline_layout = *GetPipelineLayout(pCreateInfos[i].layout); } for (uint32_t i = 0; i < count; i++) { skip |= ValidatePipelineLocked(cgpl_state->pipe_state, i); } for (uint32_t i = 0; i < count; i++) { skip |= ValidatePipelineUnlocked(cgpl_state->pipe_state, i); } if (device_extensions.vk_ext_vertex_attribute_divisor) { skip |= ValidatePipelineVertexDivisors(cgpl_state->pipe_state, count, pCreateInfos); } return skip; } // GPU validation may replace pCreateInfos for the down-chain call void CoreChecks::PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *cgpl_state_data) { create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data); cgpl_state->pCreateInfos = pCreateInfos; // GPU Validation may replace instrumented shaders with non-instrumented ones, so allow it to modify the createinfos. if (GetEnables()->gpu_validation) { cgpl_state->gpu_create_infos = GpuPreCallRecordCreateGraphicsPipelines(pipelineCache, count, pCreateInfos, pAllocator, pPipelines, cgpl_state->pipe_state); cgpl_state->pCreateInfos = reinterpret_cast<VkGraphicsPipelineCreateInfo *>(cgpl_state->gpu_create_infos.data()); } } void CoreChecks::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *cgpl_state_data) { create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data); // This API may create pipelines regardless of the return value for (uint32_t i = 0; i < count; i++) { if (pPipelines[i] != VK_NULL_HANDLE) { (cgpl_state->pipe_state)[i]->pipeline = pPipelines[i]; pipelineMap[pPipelines[i]] = std::move((cgpl_state->pipe_state)[i]); } } // GPU val needs clean up regardless of result if (GetEnables()->gpu_validation) { GpuPostCallRecordCreateGraphicsPipelines(count, pCreateInfos, pAllocator, pPipelines); cgpl_state->gpu_create_infos.clear(); } cgpl_state->pipe_state.clear(); } bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *pipe_state_data) { bool skip = false; std::vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state = reinterpret_cast<std::vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data); pipe_state->reserve(count); for (uint32_t i = 0; i < count; i++) { // Create and initialize internal tracking data structure pipe_state->push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE)); (*pipe_state)[i]->initComputePipeline(&pCreateInfos[i]); (*pipe_state)[i]->pipeline_layout = *GetPipelineLayout(pCreateInfos[i].layout); // TODO: Add Compute Pipeline Verification skip |= ValidateComputePipeline((*pipe_state)[i].get()); } return skip; } void CoreChecks::PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *pipe_state_data) { std::vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state = reinterpret_cast<std::vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data); // This API may create pipelines regardless of the return value for (uint32_t i = 0; i < count; i++) { if (pPipelines[i] != VK_NULL_HANDLE) { (*pipe_state)[i]->pipeline = pPipelines[i]; pipelineMap[pPipelines[i]] = std::move((*pipe_state)[i]); } } } bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoNV *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *pipe_state_data) { bool skip = false; // The order of operations here is a little convoluted but gets the job done // 1. Pipeline create state is first shadowed into PIPELINE_STATE struct // 2. Create state is then validated (which uses flags setup during shadowing) // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap uint32_t i = 0; vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state = reinterpret_cast<vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data); pipe_state->reserve(count); for (i = 0; i < count; i++) { pipe_state->push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE)); (*pipe_state)[i]->initRayTracingPipelineNV(&pCreateInfos[i]); (*pipe_state)[i]->pipeline_layout = *GetPipelineLayout(pCreateInfos[i].layout); } for (i = 0; i < count; i++) { skip |= ValidateRayTracingPipelineNV((*pipe_state)[i].get()); } return skip; } void CoreChecks::PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoNV *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *pipe_state_data) { vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state = reinterpret_cast<vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data); // This API may create pipelines regardless of the return value for (uint32_t i = 0; i < count; i++) { if (pPipelines[i] != VK_NULL_HANDLE) { (*pipe_state)[i]->pipeline = pPipelines[i]; pipelineMap[pPipelines[i]] = std::move((*pipe_state)[i]); } } } void CoreChecks::PostCallRecordCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSampler *pSampler, VkResult result) { samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo)); } bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) { return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo( report_data, pCreateInfo, device_extensions.vk_khr_push_descriptor, phys_dev_ext_props.max_push_descriptors, device_extensions.vk_ext_descriptor_indexing, &enabled_features.descriptor_indexing, &enabled_features.inline_uniform_block, &phys_dev_ext_props.inline_uniform_block_props); } void CoreChecks::PostCallRecordCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout, VkResult result) { if (VK_SUCCESS != result) return; descriptorSetLayoutMap[*pSetLayout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(pCreateInfo, *pSetLayout); } // Used by CreatePipelineLayout and CmdPushConstants. // Note that the index argument is optional and only used by CreatePipelineLayout. bool CoreChecks::ValidatePushConstantRange(const uint32_t offset, const uint32_t size, const char *caller_name, uint32_t index = 0) { if (disabled.push_constant_range) return false; uint32_t const maxPushConstantsSize = phys_dev_props.limits.maxPushConstantsSize; bool skip = false; // Check that offset + size don't exceed the max. // Prevent arithetic overflow here by avoiding addition and testing in this order. if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) { // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem. if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { if (offset >= maxPushConstantsSize) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-offset-00294", "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.", caller_name, index, offset, maxPushConstantsSize); } if (size > maxPushConstantsSize - offset) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-size-00298", "%s call has push constants index %u with offset %u and size %u that exceeds this device's " "maxPushConstantSize of %u.", caller_name, index, offset, size, maxPushConstantsSize); } } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { if (offset >= maxPushConstantsSize) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkCmdPushConstants-offset-00370", "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.", caller_name, index, offset, maxPushConstantsSize); } if (size > maxPushConstantsSize - offset) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkCmdPushConstants-size-00371", "%s call has push constants index %u with offset %u and size %u that exceeds this device's " "maxPushConstantSize of %u.", caller_name, index, offset, size, maxPushConstantsSize); } } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name); } } // size needs to be non-zero and a multiple of 4. if ((size == 0) || ((size & 0x3) != 0)) { if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { if (size == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-size-00296", "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name, index, size); } if (size & 0x3) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-size-00297", "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name, index, size); } } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { if (size == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkCmdPushConstants-size-arraylength", "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name, index, size); } if (size & 0x3) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkCmdPushConstants-size-00369", "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name, index, size); } } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name); } } // offset needs to be a multiple of 4. if ((offset & 0x3) != 0) { if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-offset-00295", "%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name, index, offset); } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkCmdPushConstants-offset-00368", "%s call has push constants with offset %u. Offset must be a multiple of 4.", caller_name, offset); } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name); } } return skip; } enum DSL_DESCRIPTOR_GROUPS { DSL_TYPE_SAMPLERS = 0, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK, DSL_NUM_DESCRIPTOR_GROUPS }; // Used by PreCallValidateCreatePipelineLayout. // Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage std::valarray<uint32_t> GetDescriptorCountMaxPerStage( const DeviceFeatures *enabled_features, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts, bool skip_update_after_bind) { // Identify active pipeline stages std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT, VK_SHADER_STAGE_COMPUTE_BIT}; if (enabled_features->core.geometryShader) { stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT); } if (enabled_features->core.tessellationShader) { stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); } // Allow iteration over enum values std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = { DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK}; // Sum by layouts per stage, then pick max of stages per type std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages for (auto stage : stage_flags) { std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums for (auto dsl : set_layouts) { if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) { switch (binding->descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount; stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: // count one block per binding. descriptorCount is number of bytes stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++; break; default: break; } } } } for (auto type : dsl_groups) { max_sum[type] = std::max(stage_sum[type], max_sum[type]); } } return max_sum; } // Used by PreCallValidateCreatePipelineLayout. // Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type. // Note: descriptors only count against the limit once even if used by multiple stages. std::map<uint32_t, uint32_t> GetDescriptorSum( const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) { std::map<uint32_t, uint32_t> sum_by_type; for (auto dsl : set_layouts) { if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (binding->descriptorCount > 0) { if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { // count one block per binding. descriptorCount is number of bytes sum_by_type[binding->descriptorType]++; } else { sum_by_type[binding->descriptorType] += binding->descriptorCount; } } } } return sum_by_type; } bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) { bool skip = false; // Validate layout count against device physical limit if (pCreateInfo->setLayoutCount > phys_dev_props.limits.maxBoundDescriptorSets) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286", "vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).", pCreateInfo->setLayoutCount, phys_dev_props.limits.maxBoundDescriptorSets); } // Validate Push Constant ranges uint32_t i, j; for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { skip |= ValidatePushConstantRange(pCreateInfo->pPushConstantRanges[i].offset, pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i); if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-stageFlags-requiredbitmask", "vkCreatePipelineLayout() call has no stageFlags set."); } } // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges. for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) { if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292", "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j); } } } // Early-out if (skip) return skip; std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr); unsigned int push_descriptor_set_count = 0; { for (i = 0; i < pCreateInfo->setLayoutCount; ++i) { set_layouts[i] = GetDescriptorSetLayout(this, pCreateInfo->pSetLayouts[i]); if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count; } } if (push_descriptor_set_count > 1) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293", "vkCreatePipelineLayout() Multiple push descriptor sets found."); } // Max descriptors by type, within a single pipeline stage std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, true); // Samplers if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > phys_dev_props.limits.maxPerStageDescriptorSamplers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287", "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device " "maxPerStageDescriptorSamplers limit (%d).", max_descriptors_per_stage[DSL_TYPE_SAMPLERS], phys_dev_props.limits.maxPerStageDescriptorSamplers); } // Uniform buffers if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288", "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUniformBuffers limit (%d).", max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorUniformBuffers); } // Storage buffers if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289", "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device " "maxPerStageDescriptorStorageBuffers limit (%d).", max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorStorageBuffers); } // Sampled images if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorSampledImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290", "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device " "maxPerStageDescriptorSampledImages limit (%d).", max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props.limits.maxPerStageDescriptorSampledImages); } // Storage images if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorStorageImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291", "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device " "maxPerStageDescriptorStorageImages limit (%d).", max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES], phys_dev_props.limits.maxPerStageDescriptorStorageImages); } // Input attachments if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props.limits.maxPerStageDescriptorInputAttachments) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676", "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device " "maxPerStageDescriptorInputAttachments limit (%d).", max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS], phys_dev_props.limits.maxPerStageDescriptorInputAttachments); } // Inline uniform blocks if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] > phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214", "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device " "maxPerStageDescriptorInlineUniformBlocks limit (%d).", max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK], phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks); } // Total descriptors by type // std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(set_layouts, true); // Samplers uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]; if (sum > phys_dev_props.limits.maxDescriptorSetSamplers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677", "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device " "maxDescriptorSetSamplers limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetSamplers); } // Uniform buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props.limits.maxDescriptorSetUniformBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678", "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUniformBuffers limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props.limits.maxDescriptorSetUniformBuffers); } // Dynamic uniform buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679", "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUniformBuffersDynamic limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC], phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic); } // Storage buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props.limits.maxDescriptorSetStorageBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680", "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageBuffers limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props.limits.maxDescriptorSetStorageBuffers); } // Dynamic storage buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681", "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageBuffersDynamic limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC], phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic); } // Sampled images sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]; if (sum > phys_dev_props.limits.maxDescriptorSetSampledImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682", "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device " "maxDescriptorSetSampledImages limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetSampledImages); } // Storage images sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]; if (sum > phys_dev_props.limits.maxDescriptorSetStorageImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683", "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageImages limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetStorageImages); } // Input attachments if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props.limits.maxDescriptorSetInputAttachments) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684", "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device " "maxDescriptorSetInputAttachments limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props.limits.maxDescriptorSetInputAttachments); } // Inline uniform blocks if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] > phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216", "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device " "maxDescriptorSetInlineUniformBlocks limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT], phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks); } if (device_extensions.vk_ext_descriptor_indexing) { // XXX TODO: replace with correct VU messages // Max descriptors by type, within a single pipeline stage std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, false); // Samplers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022", "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers); } // Uniform buffers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023", "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers); } // Storage buffers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024", "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers); } // Sampled images if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025", "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages); } // Storage images if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026", "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages); } // Input attachments if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027", "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments); } // Inline uniform blocks if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] > phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215", "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK], phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks); } // Total descriptors by type, summed across all pipeline stages // std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(set_layouts, false); // Samplers sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]; if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036", "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindSamplers limit (%d).", sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers); } // Uniform buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037", "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers); } // Dynamic uniform buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038", "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC], phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic); } // Storage buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039", "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers); } // Dynamic storage buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040", "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC], phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic); } // Sampled images sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]; if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041", "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindSampledImages limit (%d).", sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages); } // Storage images sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]; if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042", "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageImages limit (%d).", sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages); } // Input attachments if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043", "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments); } // Inline uniform blocks if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] > phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217", "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT], phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks); } } return skip; } // For repeatable sorting, not very useful for "memory in range" search struct PushConstantRangeCompare { bool operator()(const VkPushConstantRange *lhs, const VkPushConstantRange *rhs) const { if (lhs->offset == rhs->offset) { if (lhs->size == rhs->size) { // The comparison is arbitrary, but avoids false aliasing by comparing all fields. return lhs->stageFlags < rhs->stageFlags; } // If the offsets are the same then sorting by the end of range is useful for validation return lhs->size < rhs->size; } return lhs->offset < rhs->offset; } }; static PushConstantRangesDict push_constant_ranges_dict; PushConstantRangesId GetCanonicalId(const VkPipelineLayoutCreateInfo *info) { if (!info->pPushConstantRanges) { // Hand back the empty entry (creating as needed)... return push_constant_ranges_dict.look_up(PushConstantRanges()); } // Sort the input ranges to ensure equivalent ranges map to the same id std::set<const VkPushConstantRange *, PushConstantRangeCompare> sorted; for (uint32_t i = 0; i < info->pushConstantRangeCount; i++) { sorted.insert(info->pPushConstantRanges + i); } PushConstantRanges ranges(sorted.size()); for (const auto range : sorted) { ranges.emplace_back(*range); } return push_constant_ranges_dict.look_up(std::move(ranges)); } // Dictionary of canoncial form of the pipeline set layout of descriptor set layouts static PipelineLayoutSetLayoutsDict pipeline_layout_set_layouts_dict; // Dictionary of canonical form of the "compatible for set" records static PipelineLayoutCompatDict pipeline_layout_compat_dict; static PipelineLayoutCompatId GetCanonicalId(const uint32_t set_index, const PushConstantRangesId pcr_id, const PipelineLayoutSetLayoutsId set_layouts_id) { return pipeline_layout_compat_dict.look_up(PipelineLayoutCompatDef(set_index, pcr_id, set_layouts_id)); } void CoreChecks::PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout, void *cpl_state_data) { create_pipeline_layout_api_state *cpl_state = reinterpret_cast<create_pipeline_layout_api_state *>(cpl_state_data); if (GetEnables()->gpu_validation) { GpuPreCallCreatePipelineLayout(pCreateInfo, pAllocator, pPipelineLayout, &cpl_state->new_layouts, &cpl_state->modified_create_info); } } void CoreChecks::PostCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout, VkResult result) { // Clean up GPU validation if (GetEnables()->gpu_validation) { GpuPostCallCreatePipelineLayout(result); } if (VK_SUCCESS != result) return; PIPELINE_LAYOUT_NODE &plNode = pipelineLayoutMap[*pPipelineLayout]; plNode.layout = *pPipelineLayout; plNode.set_layouts.resize(pCreateInfo->setLayoutCount); PipelineLayoutSetLayoutsDef set_layouts(pCreateInfo->setLayoutCount); for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) { plNode.set_layouts[i] = GetDescriptorSetLayout(this, pCreateInfo->pSetLayouts[i]); set_layouts[i] = plNode.set_layouts[i]->GetLayoutId(); } // Get canonical form IDs for the "compatible for set" contents plNode.push_constant_ranges = GetCanonicalId(pCreateInfo); auto set_layouts_id = pipeline_layout_set_layouts_dict.look_up(set_layouts); plNode.compat_for_set.reserve(pCreateInfo->setLayoutCount); // Create table of "compatible for set N" cannonical forms for trivial accept validation for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) { plNode.compat_for_set.emplace_back(GetCanonicalId(i, plNode.push_constant_ranges, set_layouts_id)); } } void CoreChecks::PostCallRecordCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool, VkResult result) { if (VK_SUCCESS != result) return; DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo); assert(pNewNode); descriptorPoolMap[*pDescriptorPool] = pNewNode; } bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) { // Make sure sets being destroyed are not currently in-use if (disabled.idle_descriptor_set) return false; bool skip = false; DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(descriptorPool); if (pPool != nullptr) { for (auto ds : pPool->sets) { if (ds && ds->in_use.load()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(descriptorPool), "VUID-vkResetDescriptorPool-descriptorPool-00313", "It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer."); if (skip) break; } } } return skip; } void CoreChecks::PostCallRecordResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags, VkResult result) { if (VK_SUCCESS != result) return; DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(descriptorPool); // TODO: validate flags // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet for (auto ds : pPool->sets) { FreeDescriptorSet(ds); } pPool->sets.clear(); // Reset available count for each type and available sets for this pool for (auto it = pPool->availableDescriptorTypeCount.begin(); it != pPool->availableDescriptorTypeCount.end(); ++it) { pPool->availableDescriptorTypeCount[it->first] = pPool->maxDescriptorTypeCount[it->first]; } pPool->availableSets = pPool->maxSets; } // Ensure the pool contains enough descriptors and descriptor sets to satisfy // an allocation request. Fills common_data with the total number of descriptors of each type required, // as well as DescriptorSetLayout ptrs used for later update. bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets, void *ads_state_data) { // Always update common data cvdescriptorset::AllocateDescriptorSetsData *ads_state = reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data); UpdateAllocateDescriptorSetsData(pAllocateInfo, ads_state); // All state checks for AllocateDescriptorSets is done in single function return ValidateAllocateDescriptorSets(pAllocateInfo, ads_state); } // Allocation state was good and call down chain was made so update state based on allocating descriptor sets void CoreChecks::PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets, VkResult result, void *ads_state_data) { if (VK_SUCCESS != result) return; // All the updates are contained in a single cvdescriptorset function cvdescriptorset::AllocateDescriptorSetsData *ads_state = reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data); PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, ads_state, &descriptorPoolMap, &setMap); } bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) { // Make sure that no sets being destroyed are in-flight bool skip = false; // First make sure sets being destroyed are not currently in-use for (uint32_t i = 0; i < count; ++i) { if (pDescriptorSets[i] != VK_NULL_HANDLE) { skip |= ValidateIdleDescriptorSet(pDescriptorSets[i], "vkFreeDescriptorSets"); } } DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool); if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) { // Can't Free from a NON_FREE pool skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(descriptorPool), "VUID-vkFreeDescriptorSets-descriptorPool-00312", "It is invalid to call vkFreeDescriptorSets() with a pool created without setting " "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT."); } return skip; } void CoreChecks::PreCallRecordFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) { DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool); // Update available descriptor sets in pool pool_state->availableSets += count; // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap for (uint32_t i = 0; i < count; ++i) { if (pDescriptorSets[i] != VK_NULL_HANDLE) { auto descriptor_set = setMap[pDescriptorSets[i]]; uint32_t type_index = 0, descriptor_count = 0; for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) { type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j)); descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j); pool_state->availableDescriptorTypeCount[type_index] += descriptor_count; } FreeDescriptorSet(descriptor_set); pool_state->sets.erase(descriptor_set); } } } bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) { // First thing to do is perform map look-ups. // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets // so we can't just do a single map look-up up-front, but do them individually in functions below // Now make call(s) that validate state, but don't perform state updates in this function // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the // namespace which will parse params and make calls into specific class instances return ValidateUpdateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies, "vkUpdateDescriptorSets()"); } void CoreChecks::PreCallRecordUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) { cvdescriptorset::PerformUpdateDescriptorSets(this, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies); } void CoreChecks::PostCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer, VkResult result) { if (VK_SUCCESS != result) return; auto pPool = GetCommandPoolNode(pCreateInfo->commandPool); if (pPool) { for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) { // Add command buffer to its commandPool map pPool->commandBuffers.insert(pCommandBuffer[i]); GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE; // Add command buffer to map commandBufferMap[pCommandBuffer[i]] = pCB; ResetCommandBufferState(pCommandBuffer[i]); pCB->createInfo = *pCreateInfo; pCB->device = device; } } } // Add bindings between the given cmd buffer & framebuffer and the framebuffer's children void CoreChecks::AddFramebufferBinding(GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) { AddCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer}, cb_state); const uint32_t attachmentCount = fb_state->createInfo.attachmentCount; for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) { auto view_state = GetAttachmentImageViewState(fb_state, attachment); if (view_state) { AddCommandBufferBindingImageView(cb_state, view_state); } } } bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); if (!cb_state) return false; bool skip = false; if (cb_state->in_use.load()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049", "Calling vkBeginCommandBuffer() on active command buffer %s before it has completed. You must check " "command buffer fence before this call.", report_data->FormatHandle(commandBuffer).c_str()); } if (cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { // Secondary Command Buffer const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo; if (!pInfo) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00051", "vkBeginCommandBuffer(): Secondary Command Buffer (%s) must have inheritance info.", report_data->FormatHandle(commandBuffer).c_str()); } else { if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { assert(pInfo->renderPass); auto framebuffer = GetFramebufferState(pInfo->framebuffer); if (framebuffer) { if (framebuffer->createInfo.renderPass != pInfo->renderPass) { // renderPass that framebuffer was created with must be compatible with local renderPass skip |= ValidateRenderPassCompatibility("framebuffer", framebuffer->rp_state.get(), "command buffer", GetRenderPassState(pInfo->renderPass), "vkBeginCommandBuffer()", "VUID-VkCommandBufferBeginInfo-flags-00055"); } } } if ((pInfo->occlusionQueryEnable == VK_FALSE || enabled_features.core.occlusionQueryPrecise == VK_FALSE) && (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00052", "vkBeginCommandBuffer(): Secondary Command Buffer (%s) must not have VK_QUERY_CONTROL_PRECISE_BIT if " "occulusionQuery is disabled or the device does not support precise occlusion queries.", report_data->FormatHandle(commandBuffer).c_str()); } } if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) { auto renderPass = GetRenderPassState(pInfo->renderPass); if (renderPass) { if (pInfo->subpass >= renderPass->createInfo.subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkCommandBufferBeginInfo-flags-00054", "vkBeginCommandBuffer(): Secondary Command Buffers (%s) must have a subpass index (%d) that is " "less than the number of subpasses (%d).", report_data->FormatHandle(commandBuffer).c_str(), pInfo->subpass, renderPass->createInfo.subpassCount); } } } } if (CB_RECORDING == cb_state->state) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049", "vkBeginCommandBuffer(): Cannot call Begin on command buffer (%s) in the RECORDING state. Must first call " "vkEndCommandBuffer().", report_data->FormatHandle(commandBuffer).c_str()); } else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) { VkCommandPool cmdPool = cb_state->createInfo.commandPool; auto pPool = GetCommandPoolNode(cmdPool); if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00050", "Call to vkBeginCommandBuffer() on command buffer (%s) attempts to implicitly reset cmdBuffer created from " "command pool (%s) that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmdPool).c_str()); } } auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext); if (chained_device_group_struct) { skip |= ValidateDeviceMaskToPhysicalDeviceCount( chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106"); skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107"); } return skip; } void CoreChecks::PreCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); if (!cb_state) return; // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references ClearCmdBufAndMemReferences(cb_state); if (cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { // Secondary Command Buffer const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo; if (pInfo) { if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { assert(pInfo->renderPass); auto framebuffer = GetFramebufferState(pInfo->framebuffer); if (framebuffer) { // Connect this framebuffer and its children to this cmdBuffer AddFramebufferBinding(cb_state, framebuffer); } } } } if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) { ResetCommandBufferState(commandBuffer); } // Set updated state here in case implicit reset occurs above cb_state->state = CB_RECORDING; cb_state->beginInfo = *pBeginInfo; if (cb_state->beginInfo.pInheritanceInfo) { cb_state->inheritanceInfo = *(cb_state->beginInfo.pInheritanceInfo); cb_state->beginInfo.pInheritanceInfo = &cb_state->inheritanceInfo; // If we are a secondary command-buffer and inheriting. Update the items we should inherit. if ((cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { cb_state->activeRenderPass = GetRenderPassState(cb_state->beginInfo.pInheritanceInfo->renderPass); cb_state->activeSubpass = cb_state->beginInfo.pInheritanceInfo->subpass; cb_state->activeFramebuffer = cb_state->beginInfo.pInheritanceInfo->framebuffer; cb_state->framebuffers.insert(cb_state->beginInfo.pInheritanceInfo->framebuffer); } } auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext); if (chained_device_group_struct) { cb_state->initial_device_mask = chained_device_group_struct->deviceMask; } else { cb_state->initial_device_mask = (1 << physical_device_count) - 1; } } bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); if (!cb_state) return false; bool skip = false; if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) || !(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { // This needs spec clarification to update valid usage, see comments in PR: // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165 skip |= InsideRenderPass(cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060"); } skip |= ValidateCmd(cb_state, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()"); for (auto query : cb_state->activeQueries) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkEndCommandBuffer-commandBuffer-00061", "Ending command buffer with in progress query: queryPool %s, index %d.", report_data->FormatHandle(query.pool).c_str(), query.index); } return skip; } void CoreChecks::PostCallRecordEndCommandBuffer(VkCommandBuffer commandBuffer, VkResult result) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); if (!cb_state) return; // Cached validation is specific to a specific recording of a specific command buffer. for (auto descriptor_set : cb_state->validated_descriptor_sets) { descriptor_set->ClearCachedValidation(cb_state); } cb_state->validated_descriptor_sets.clear(); if (VK_SUCCESS == result) { cb_state->state = CB_RECORDED; } } bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) { bool skip = false; GLOBAL_CB_NODE *pCB = GetCBNode(commandBuffer); if (!pCB) return false; VkCommandPool cmdPool = pCB->createInfo.commandPool; auto pPool = GetCommandPoolNode(cmdPool); if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkResetCommandBuffer-commandBuffer-00046", "Attempt to reset command buffer (%s) created from command pool (%s) that does NOT have the " "VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmdPool).c_str()); } skip |= CheckCommandBufferInFlight(pCB, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045"); return skip; } void CoreChecks::PostCallRecordResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags, VkResult result) { if (VK_SUCCESS == result) { ResetCommandBufferState(commandBuffer); } } static const char *GetPipelineTypeName(VkPipelineBindPoint pipelineBindPoint) { switch (pipelineBindPoint) { case VK_PIPELINE_BIND_POINT_GRAPHICS: return "graphics"; case VK_PIPELINE_BIND_POINT_COMPUTE: return "compute"; case VK_PIPELINE_BIND_POINT_RAY_TRACING_NV: return "ray-tracing"; default: return "unknown"; } } bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdBindPipeline-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()"); static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindPipeline-pipelineBindPoint-00777"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindPipeline-pipelineBindPoint-00778"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindPipeline-pipelineBindPoint-02391")}; skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors); auto pipeline_state = GetPipelineState(pipeline); assert(pipeline_state); const auto &pipeline_state_bind_point = pipeline_state->getPipelineType(); if (pipelineBindPoint != pipeline_state_bind_point) { if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-00779", "Cannot bind a pipeline of type %s to the graphics pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-00780", "Cannot bind a pipeline of type %s to the compute pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-02392", "Cannot bind a pipeline of type %s to the ray-tracing pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } } return skip; } void CoreChecks::PreCallRecordCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); auto pipe_state = GetPipelineState(pipeline); if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) { cb_state->status &= ~cb_state->static_status; cb_state->static_status = MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState); cb_state->status |= cb_state->static_status; } cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state; SetPipelineState(pipe_state); AddCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state); } bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewport-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()"); if (cb_state->static_status & CBSTATUS_VIEWPORT_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetViewport-None-01221", "vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag."); } return skip; } void CoreChecks::PreCallRecordCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport; cb_state->status |= CBSTATUS_VIEWPORT_SET; } bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetScissor-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()"); if (cb_state->static_status & CBSTATUS_SCISSOR_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-None-00590", "vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor; cb_state->status |= CBSTATUS_SCISSOR_SET; } bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetExclusiveScissorNV()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetExclusiveScissorNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETEXCLUSIVESCISSOR, "vkCmdSetExclusiveScissorNV()"); if (cb_state->static_status & CBSTATUS_EXCLUSIVE_SCISSOR_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02032", "vkCmdSetExclusiveScissorNV(): pipeline was created without VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV flag."); } if (!GetEnabledFeatures()->exclusive_scissor.exclusiveScissor) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02031", "vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled."); } return skip; } void CoreChecks::PreCallRecordCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); // TODO: We don't have VUIDs for validating that all exclusive scissors have been set. // cb_state->exclusiveScissorMask |= ((1u << exclusiveScissorCount) - 1u) << firstExclusiveScissor; cb_state->status |= CBSTATUS_EXCLUSIVE_SCISSOR_SET; } bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindShadingRateImageNV()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBindShadingRateImageNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDSHADINGRATEIMAGE, "vkCmdBindShadingRateImageNV()"); if (!GetEnabledFeatures()->shading_rate_image.shadingRateImage) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdBindShadingRateImageNV-None-02058", "vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled."); } if (imageView != VK_NULL_HANDLE) { auto view_state = GetImageViewState(imageView); auto &ivci = view_state->create_info; if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02059", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid " "VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY."); } if (view_state && ivci.format != VK_FORMAT_R8_UINT) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02060", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT."); } const VkImageCreateInfo *ici = view_state ? &GetImageState(view_state->create_info.image)->createInfo : nullptr; if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02061", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been " "created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set."); } if (view_state) { auto image_state = GetImageState(view_state->create_info.image); bool hit_error = false; // XXX TODO: While the VUID says "each subresource", only the base mip level is // actually used. Since we don't have an existing convenience function to iterate // over all mip levels, just don't bother with non-base levels. VkImageSubresourceRange &range = view_state->create_info.subresourceRange; VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount}; if (image_state) { skip |= VerifyImageLayout(cb_state, image_state, subresource, imageLayout, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, "vkCmdCopyImage()", "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063", "VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error); } } } return skip; } void CoreChecks::PreCallRecordCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); if (imageView != VK_NULL_HANDLE) { auto view_state = GetImageViewState(imageView); AddCommandBufferBindingImageView(cb_state, view_state); } } bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV *pShadingRatePalettes) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportShadingRatePaletteNV()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTE, "vkCmdSetViewportShadingRatePaletteNV()"); if (!GetEnabledFeatures()->shading_rate_image.shadingRateImage) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064", "vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled."); } if (cb_state->static_status & CBSTATUS_SHADING_RATE_PALETTE_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02065", "vkCmdSetViewportShadingRatePaletteNV(): pipeline was created without " "VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV flag."); } for (uint32_t i = 0; i < viewportCount; ++i) { auto *palette = &pShadingRatePalettes[i]; if (palette->shadingRatePaletteEntryCount == 0 || palette->shadingRatePaletteEntryCount > phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071", "vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize."); } } return skip; } void CoreChecks::PreCallRecordCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV *pShadingRatePalettes) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); // TODO: We don't have VUIDs for validating that all shading rate palettes have been set. // cb_state->shadingRatePaletteMask |= ((1u << viewportCount) - 1u) << firstViewport; cb_state->status |= CBSTATUS_SHADING_RATE_PALETTE_SET; } bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetLineWidth-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()"); if (cb_state->static_status & CBSTATUS_LINE_WIDTH_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetLineWidth-None-00787", "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag."); } return skip; } void CoreChecks::PreCallRecordCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->status |= CBSTATUS_LINE_WIDTH_SET; } bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetDepthBias-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()"); if (cb_state->static_status & CBSTATUS_DEPTH_BIAS_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-None-00789", "vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag.."); } if ((depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-depthBiasClamp-00790", "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must " "be set to 0.0."); } return skip; } void CoreChecks::PreCallRecordCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->status |= CBSTATUS_DEPTH_BIAS_SET; } bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetBlendConstants-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()"); if (cb_state->static_status & CBSTATUS_BLEND_CONSTANTS_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetBlendConstants-None-00612", "vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->status |= CBSTATUS_BLEND_CONSTANTS_SET; } bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetDepthBounds-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()"); if (cb_state->static_status & CBSTATUS_DEPTH_BOUNDS_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBounds-None-00599", "vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->status |= CBSTATUS_DEPTH_BOUNDS_SET; } bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetStencilCompareMask-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()"); if (cb_state->static_status & CBSTATUS_STENCIL_READ_MASK_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilCompareMask-None-00602", "vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->status |= CBSTATUS_STENCIL_READ_MASK_SET; } bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetStencilWriteMask-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()"); if (cb_state->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilWriteMask-None-00603", "vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->status |= CBSTATUS_STENCIL_WRITE_MASK_SET; } bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetStencilReference-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()"); if (cb_state->static_status & CBSTATUS_STENCIL_REFERENCE_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilReference-None-00604", "vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->status |= CBSTATUS_STENCIL_REFERENCE_SET; } // Update pipeline_layout bind points applying the "Pipeline Layout Compatibility" rules void CoreChecks::UpdateLastBoundDescriptorSets(GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint pipeline_bind_point, const PIPELINE_LAYOUT_NODE *pipeline_layout, uint32_t first_set, uint32_t set_count, const std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets, uint32_t dynamic_offset_count, const uint32_t *p_dynamic_offsets) { // Defensive assert(set_count); if (0 == set_count) return; assert(pipeline_layout); if (!pipeline_layout) return; uint32_t required_size = first_set + set_count; const uint32_t last_binding_index = required_size - 1; assert(last_binding_index < pipeline_layout->compat_for_set.size()); // Some useful shorthand auto &last_bound = cb_state->lastBound[pipeline_bind_point]; auto &bound_sets = last_bound.boundDescriptorSets; auto &dynamic_offsets = last_bound.dynamicOffsets; auto &bound_compat_ids = last_bound.compat_id_for_set; auto &pipe_compat_ids = pipeline_layout->compat_for_set; const uint32_t current_size = static_cast<uint32_t>(bound_sets.size()); assert(current_size == dynamic_offsets.size()); assert(current_size == bound_compat_ids.size()); // We need this three times in this function, but nowhere else auto push_descriptor_cleanup = [&last_bound](const cvdescriptorset::DescriptorSet *ds) -> bool { if (ds && ds->IsPushDescriptor()) { assert(ds == last_bound.push_descriptor_set.get()); last_bound.push_descriptor_set = nullptr; return true; } return false; }; // Clean up the "disturbed" before and after the range to be set if (required_size < current_size) { if (bound_compat_ids[last_binding_index] != pipe_compat_ids[last_binding_index]) { // We're disturbing those after last, we'll shrink below, but first need to check for and cleanup the push_descriptor for (auto set_idx = required_size; set_idx < current_size; ++set_idx) { if (push_descriptor_cleanup(bound_sets[set_idx])) break; } } else { // We're not disturbing past last, so leave the upper binding data alone. required_size = current_size; } } // We resize if we need more set entries or if those past "last" are disturbed if (required_size != current_size) { // TODO: put these size tied things in a struct (touches many lines) bound_sets.resize(required_size); dynamic_offsets.resize(required_size); bound_compat_ids.resize(required_size); } // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update for (uint32_t set_idx = 0; set_idx < first_set; ++set_idx) { if (bound_compat_ids[set_idx] != pipe_compat_ids[set_idx]) { push_descriptor_cleanup(bound_sets[set_idx]); bound_sets[set_idx] = nullptr; dynamic_offsets[set_idx].clear(); bound_compat_ids[set_idx] = pipe_compat_ids[set_idx]; } } // Now update the bound sets with the input sets const uint32_t *input_dynamic_offsets = p_dynamic_offsets; // "read" pointer for dynamic offset data for (uint32_t input_idx = 0; input_idx < set_count; input_idx++) { auto set_idx = input_idx + first_set; // set_idx is index within layout, input_idx is index within input descriptor sets cvdescriptorset::DescriptorSet *descriptor_set = descriptor_sets[input_idx]; // Record binding (or push) if (descriptor_set != last_bound.push_descriptor_set.get()) { // Only cleanup the push descriptors if they aren't the currently used set. push_descriptor_cleanup(bound_sets[set_idx]); } bound_sets[set_idx] = descriptor_set; bound_compat_ids[set_idx] = pipe_compat_ids[set_idx]; // compat ids are canonical *per* set index if (descriptor_set) { auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount(); // TODO: Add logic for tracking push_descriptor offsets (here or in caller) if (set_dynamic_descriptor_count && input_dynamic_offsets) { const uint32_t *end_offset = input_dynamic_offsets + set_dynamic_descriptor_count; dynamic_offsets[set_idx] = std::vector<uint32_t>(input_dynamic_offsets, end_offset); input_dynamic_offsets = end_offset; assert(input_dynamic_offsets <= (p_dynamic_offsets + dynamic_offset_count)); } else { dynamic_offsets[set_idx].clear(); } if (!descriptor_set->IsPushDescriptor()) { // Can't cache validation of push_descriptors cb_state->validated_descriptor_sets.insert(descriptor_set); } } } } // Update the bound state for the bind point, including the effects of incompatible pipeline layouts void CoreChecks::PreCallRecordCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); auto pipeline_layout = GetPipelineLayout(layout); std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets; descriptor_sets.reserve(setCount); // Construct a list of the descriptors bool found_non_null = false; for (uint32_t i = 0; i < setCount; i++) { cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[i]); descriptor_sets.emplace_back(descriptor_set); found_non_null |= descriptor_set != nullptr; } if (found_non_null) { // which implies setCount > 0 UpdateLastBoundDescriptorSets(cb_state, pipelineBindPoint, pipeline_layout, firstSet, setCount, descriptor_sets, dynamicOffsetCount, pDynamicOffsets); cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout; } } static bool ValidateDynamicOffsetAlignment(const debug_report_data *report_data, const VkDescriptorSetLayoutBinding *binding, VkDescriptorType test_type, VkDeviceSize alignment, const uint32_t *pDynamicOffsets, const char *err_msg, const char *limit_name, uint32_t *offset_idx) { bool skip = false; if (binding->descriptorType == test_type) { const auto end_idx = *offset_idx + binding->descriptorCount; for (uint32_t current_idx = *offset_idx; current_idx < end_idx; current_idx++) { if (SafeModulo(pDynamicOffsets[current_idx], alignment) != 0) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, err_msg, "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of device limit %s 0x%" PRIxLEAST64 ".", current_idx, pDynamicOffsets[current_idx], limit_name, alignment); } } *offset_idx = end_idx; } return skip; } bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmdQueueFlags(cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdBindDescriptorSets-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()"); // Track total count of dynamic descriptor types to make sure we have an offset for each one uint32_t total_dynamic_descriptors = 0; string error_string = ""; uint32_t last_set_index = firstSet + setCount - 1; if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) { cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1); cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1); cb_state->lastBound[pipelineBindPoint].compat_id_for_set.resize(last_set_index + 1); } auto pipeline_layout = GetPipelineLayout(layout); for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) { cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[set_idx]); if (descriptor_set) { // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout if (!VerifySetLayoutCompatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358", "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of " "pipelineLayout %s due to: %s.", set_idx, set_idx + firstSet, report_data->FormatHandle(layout).c_str(), error_string.c_str()); } auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount(); if (set_dynamic_descriptor_count) { // First make sure we won't overstep bounds of pDynamicOffsets array if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) { // Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359", "descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u dynamicOffsets are left in " "pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.", set_idx, report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(), descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors)); // Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from // testing against the "short tail" we're skipping below. total_dynamic_descriptors = dynamicOffsetCount; } else { // Validate dynamic offsets and Dynamic Offset Minimums uint32_t cur_dyn_offset = total_dynamic_descriptors; const auto dsl = descriptor_set->GetLayout(); const auto binding_count = dsl->GetBindingCount(); const auto &limits = phys_dev_props.limits; for (uint32_t binding_idx = 0; binding_idx < binding_count; binding_idx++) { const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); skip |= ValidateDynamicOffsetAlignment(report_data, binding, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, limits.minUniformBufferOffsetAlignment, pDynamicOffsets, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971", "minUniformBufferOffsetAlignment", &cur_dyn_offset); skip |= ValidateDynamicOffsetAlignment(report_data, binding, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, limits.minStorageBufferOffsetAlignment, pDynamicOffsets, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972", "minStorageBufferOffsetAlignment", &cur_dyn_offset); } // Keep running total of dynamic descriptor count to verify at the end total_dynamic_descriptors += set_dynamic_descriptor_count; } } } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]), kVUID_Core_DrawState_InvalidSet, "Attempt to bind descriptor set %s that doesn't exist!", report_data->FormatHandle(pDescriptorSets[set_idx]).c_str()); } } // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound if (total_dynamic_descriptors != dynamicOffsetCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359", "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should " "exactly match the number of dynamic descriptors.", setCount, total_dynamic_descriptors, dynamicOffsetCount); } return skip; } // Validates that the supplied bind point is supported for the command buffer (vis. the command pool) // Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint // TODO add vkCmdBindPipeline bind_point validation using this call. bool CoreChecks::ValidatePipelineBindPoint(GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point, const char *func_name, const std::map<VkPipelineBindPoint, std::string> &bind_errors) { bool skip = false; auto pool = GetCommandPoolNode(cb_state->createInfo.commandPool); if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)), }; const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex]; if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) { const std::string &error = bind_errors.at(bind_point); auto cb_u64 = HandleToUint64(cb_state->commandBuffer); auto cp_u64 = HandleToUint64(cb_state->createInfo.commandPool); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_u64, error, "%s: CommandBuffer %s was allocated from VkCommandPool %s that does not support bindpoint %s.", func_name, report_data->FormatHandle(cb_u64).c_str(), report_data->FormatHandle(cp_u64).c_str(), string_VkPipelineBindPoint(bind_point)); } } return skip; } bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); const char *func_name = "vkCmdPushDescriptorSetKHR()"; bool skip = false; skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name); skip |= ValidateCmdQueueFlags(cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT), "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool"); static const std::map<VkPipelineBindPoint, std::string> bind_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")}; skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, func_name, bind_errors); auto layout_data = GetPipelineLayout(layout); // Validate the set index points to a push descriptor set and is in range if (layout_data) { const auto &set_layouts = layout_data->set_layouts; const auto layout_u64 = HandleToUint64(layout); if (set < set_layouts.size()) { const auto dsl = set_layouts[set]; if (dsl) { if (!dsl->IsPushDescriptor()) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00365", "%s: Set index %" PRIu32 " does not match push descriptor set layout index for VkPipelineLayout %s.", func_name, set, report_data->FormatHandle(layout_u64).c_str()); } else { // Create an empty proxy in order to use the existing descriptor set update validation // TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we // don't have to do this. cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, VK_NULL_HANDLE, dsl, 0, this); skip |= proxy_ds.ValidatePushDescriptorsUpdate(report_data, descriptorWriteCount, pDescriptorWrites, func_name); } } } else { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00364", "%s: Set index %" PRIu32 " is outside of range for VkPipelineLayout %s (set < %" PRIu32 ").", func_name, set, report_data->FormatHandle(layout_u64).c_str(), static_cast<uint32_t>(set_layouts.size())); } } return skip; } void CoreChecks::RecordCmdPushDescriptorSetState(GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) { const auto &pipeline_layout = GetPipelineLayout(layout); // Short circuit invalid updates if (!pipeline_layout || (set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[set] || !pipeline_layout->set_layouts[set]->IsPushDescriptor()) return; // We need a descriptor set to update the bindings with, compatible with the passed layout const auto dsl = pipeline_layout->set_layouts[set]; auto &last_bound = cb_state->lastBound[pipelineBindPoint]; auto &push_descriptor_set = last_bound.push_descriptor_set; // If we are disturbing the current push_desriptor_set clear it if (!push_descriptor_set || !CompatForSet(set, last_bound.compat_id_for_set, pipeline_layout->compat_for_set)) { push_descriptor_set.reset(new cvdescriptorset::DescriptorSet(0, 0, dsl, 0, this)); } std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets = {push_descriptor_set.get()}; UpdateLastBoundDescriptorSets(cb_state, pipelineBindPoint, pipeline_layout, set, 1, descriptor_sets, 0, nullptr); last_bound.pipeline_layout = layout; // Now that we have either the new or extant push_descriptor set ... do the write updates against it push_descriptor_set->PerformPushDescriptorsUpdate(descriptorWriteCount, pDescriptorWrites); } void CoreChecks::PreCallRecordCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); RecordCmdPushDescriptorSetState(cb_state, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites); } static VkDeviceSize GetIndexAlignment(VkIndexType indexType) { switch (indexType) { case VK_INDEX_TYPE_UINT16: return 2; case VK_INDEX_TYPE_UINT32: return 4; default: // Not a real index type. Express no alignment requirement here; we expect upper layer // to have already picked up on the enum being nonsense. return 1; } } bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) { auto buffer_state = GetBufferState(buffer); auto cb_node = GetCBNode(commandBuffer); assert(buffer_state); assert(cb_node); bool skip = ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, "VUID-vkCmdBindIndexBuffer-buffer-00433", "vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBindIndexBuffer-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434"); auto offset_align = GetIndexAlignment(indexType); if (offset % offset_align) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdBindIndexBuffer-offset-00432", "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset, string_VkIndexType(indexType)); } return skip; } void CoreChecks::PreCallRecordCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) { auto buffer_state = GetBufferState(buffer); auto cb_node = GetCBNode(commandBuffer); cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND; cb_node->index_buffer_binding.buffer = buffer; cb_node->index_buffer_binding.size = buffer_state->createInfo.size; cb_node->index_buffer_binding.offset = offset; cb_node->index_buffer_binding.index_type = indexType; } bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) { auto cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBindVertexBuffers-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()"); for (uint32_t i = 0; i < bindingCount; ++i) { auto buffer_state = GetBufferState(pBuffers[i]); assert(buffer_state); skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, "VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers()", "VUID-vkCmdBindVertexBuffers-pBuffers-00628"); if (pOffsets[i] >= buffer_state->createInfo.size) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer_state->buffer), "VUID-vkCmdBindVertexBuffers-pOffsets-00626", "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]); } } return skip; } void CoreChecks::PreCallRecordCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) { auto cb_state = GetCBNode(commandBuffer); uint32_t end = firstBinding + bindingCount; if (cb_state->current_draw_data.vertex_buffer_bindings.size() < end) { cb_state->current_draw_data.vertex_buffer_bindings.resize(end); } for (uint32_t i = 0; i < bindingCount; ++i) { auto &vertex_buffer_binding = cb_state->current_draw_data.vertex_buffer_bindings[i + firstBinding]; vertex_buffer_binding.buffer = pBuffers[i]; vertex_buffer_binding.offset = pOffsets[i]; } } // Validate that an image's sampleCount matches the requirement for a specific API call bool CoreChecks::ValidateImageSampleCount(IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, const char *location, const std::string &msgCode) { bool skip = false; if (image_state->createInfo.samples != sample_count) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), msgCode, "%s for image %s was created with a sample count of %s but must be %s.", location, report_data->FormatHandle(image_state->image).c_str(), string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count)); } return skip; } bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) { auto cb_state = GetCBNode(commandBuffer); assert(cb_state); auto dst_buffer_state = GetBufferState(dstBuffer); assert(dst_buffer_state); bool skip = false; skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-dstBuffer-00035"); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdUpdateBuffer-dstBuffer-00034", "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdUpdateBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdUpdateBuffer-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()"); skip |= InsideRenderPass(cb_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-renderpass"); return skip; } void CoreChecks::PostCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) { auto cb_state = GetCBNode(commandBuffer); auto dst_buffer_state = GetBufferState(dstBuffer); // Update bindings between buffer and cmd buffer AddCommandBufferBindingBuffer(cb_state, dst_buffer_state); } bool CoreChecks::SetEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { GLOBAL_CB_NODE *pCB = GetCBNode(commandBuffer); if (pCB) { pCB->eventToStageMap[event] = stageMask; } auto queue_data = queueMap.find(queue); if (queue_data != queueMap.end()) { queue_data->second.eventToStageMap[event] = stageMask; } return false; } bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdSetEvent-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETEVENT, "vkCmdSetEvent()"); skip |= InsideRenderPass(cb_state, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-renderpass"); skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-stageMask-01150", "VUID-vkCmdSetEvent-stageMask-01151", "VUID-vkCmdSetEvent-stageMask-02107", "VUID-vkCmdSetEvent-stageMask-02108"); return skip; } void CoreChecks::PreCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); auto event_state = GetEventNode(event); if (event_state) { AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, cb_state); event_state->cb_bindings.insert(cb_state); } cb_state->events.push_back(event); if (!cb_state->waitedEvents.count(event)) { cb_state->writeEventsBeforeWait.push_back(event); } cb_state->eventUpdates.emplace_back([=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, stageMask); }); } bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdResetEvent-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_RESETEVENT, "vkCmdResetEvent()"); skip |= InsideRenderPass(cb_state, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-renderpass"); skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-stageMask-01154", "VUID-vkCmdResetEvent-stageMask-01155", "VUID-vkCmdResetEvent-stageMask-02109", "VUID-vkCmdResetEvent-stageMask-02110"); return skip; } void CoreChecks::PreCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); auto event_state = GetEventNode(event); if (event_state) { AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, cb_state); event_state->cb_bindings.insert(cb_state); } cb_state->events.push_back(event); if (!cb_state->waitedEvents.count(event)) { cb_state->writeEventsBeforeWait.push_back(event); } // TODO : Add check for "VUID-vkResetEvent-event-01148" cb_state->eventUpdates.emplace_back( [=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0)); }); } // Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT is set static VkPipelineStageFlags ExpandPipelineStageFlags(const DeviceExtensions &extensions, VkPipelineStageFlags inflags) { if (~inflags & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) return inflags; return (inflags & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) | (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | (extensions.vk_nv_mesh_shader ? (VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV) : 0) | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | (extensions.vk_ext_conditional_rendering ? VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT : 0) | (extensions.vk_ext_transform_feedback ? VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT : 0) | (extensions.vk_nv_shading_rate_image ? VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV : 0) | (extensions.vk_ext_fragment_density_map ? VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT : 0)); } static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags inflags) { return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0; } static int GetGraphicsPipelineStageLogicalOrdinal(VkPipelineStageFlagBits flag) { // Note that the list (and lookup) ignore invalid-for-enabled-extension condition. This should be checked elsewhere // and would greatly complicate this intentionally simple implementation // clang-format off const VkPipelineStageFlagBits ordered_array[] = { VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT, // Including the task/mesh shaders here is not technically correct, as they are in a // separate logical pipeline - but it works for the case this is currently used, and // fixing it would require significant rework and end up with the code being far more // verbose for no practical gain. // However, worth paying attention to this if using this function in a new way. VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT }; // clang-format on const int ordered_array_length = sizeof(ordered_array) / sizeof(VkPipelineStageFlagBits); for (int i = 0; i < ordered_array_length; ++i) { if (ordered_array[i] == flag) { return i; } } return -1; } // The following two functions technically have O(N^2) complexity, but it's for a value of O that's largely // stable and also rather tiny - this could definitely be rejigged to work more efficiently, but the impact // on runtime is currently negligible, so it wouldn't gain very much. // If we add a lot more graphics pipeline stages, this set of functions should be rewritten to accomodate. static VkPipelineStageFlagBits GetLogicallyEarliestGraphicsPipelineStage(VkPipelineStageFlags inflags) { VkPipelineStageFlagBits earliest_bit = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; int earliest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(earliest_bit); for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) { VkPipelineStageFlagBits current_flag = (VkPipelineStageFlagBits)((inflags & 0x1u) << i); if (current_flag) { int new_order = GetGraphicsPipelineStageLogicalOrdinal(current_flag); if (new_order != -1 && new_order < earliest_bit_order) { earliest_bit_order = new_order; earliest_bit = current_flag; } } inflags = inflags >> 1; } return earliest_bit; } static VkPipelineStageFlagBits GetLogicallyLatestGraphicsPipelineStage(VkPipelineStageFlags inflags) { VkPipelineStageFlagBits latest_bit = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; int latest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(latest_bit); for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) { if (inflags & 0x1u) { int new_order = GetGraphicsPipelineStageLogicalOrdinal((VkPipelineStageFlagBits)((inflags & 0x1u) << i)); if (new_order != -1 && new_order > latest_bit_order) { latest_bit_order = new_order; latest_bit = (VkPipelineStageFlagBits)((inflags & 0x1u) << i); } } inflags = inflags >> 1; } return latest_bit; } // Verify image barrier image state and that the image is consistent with FB image bool CoreChecks::ValidateImageBarrierImage(const char *funcName, GLOBAL_CB_NODE const *cb_state, VkFramebuffer framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription2KHR &sub_desc, uint64_t rp_handle, uint32_t img_index, const VkImageMemoryBarrier &img_barrier) { bool skip = false; const auto &fb_state = GetFramebufferState(framebuffer); assert(fb_state); const auto img_bar_image = img_barrier.image; bool image_match = false; bool sub_image_found = false; // Do we find a corresponding subpass description VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED; uint32_t attach_index = 0; // Verify that a framebuffer image matches barrier image const auto attachmentCount = fb_state->createInfo.attachmentCount; for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) { auto view_state = GetAttachmentImageViewState(fb_state, attachment); if (view_state && (img_bar_image == view_state->create_info.image)) { image_match = true; attach_index = attachment; break; } } if (image_match) { // Make sure subpass is referring to matching attachment if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) { sub_image_layout = sub_desc.pDepthStencilAttachment->layout; sub_image_found = true; } else if (GetDeviceExtensions()->vk_khr_depth_stencil_resolve) { const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(sub_desc.pNext); if (resolve && resolve->pDepthStencilResolveAttachment && resolve->pDepthStencilResolveAttachment->attachment == attach_index) { sub_image_layout = resolve->pDepthStencilResolveAttachment->layout; sub_image_found = true; } } else { for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) { if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) { sub_image_layout = sub_desc.pColorAttachments[j].layout; sub_image_found = true; break; } else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) { sub_image_layout = sub_desc.pResolveAttachments[j].layout; sub_image_found = true; break; } } } if (!sub_image_found) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-image-02635", "%s: Barrier pImageMemoryBarriers[%d].image (%s) is not referenced by the VkSubpassDescription for " "active subpass (%d) of current renderPass (%s).", funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass, report_data->FormatHandle(rp_handle).c_str()); } } else { // !image_match auto const fb_handle = HandleToUint64(fb_state->framebuffer); skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, fb_handle, "VUID-vkCmdPipelineBarrier-image-02635", "%s: Barrier pImageMemoryBarriers[%d].image (%s) does not match an image from the current framebuffer (%s).", funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), report_data->FormatHandle(fb_handle).c_str()); } if (img_barrier.oldLayout != img_barrier.newLayout) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-oldLayout-01181", "%s: As the Image Barrier for image %s is being executed within a render pass instance, oldLayout must " "equal newLayout yet they are %s and %s.", funcName, report_data->FormatHandle(img_barrier.image).c_str(), string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(img_barrier.newLayout)); } else { if (sub_image_found && sub_image_layout != img_barrier.oldLayout) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-oldLayout-02636", "%s: Barrier pImageMemoryBarriers[%d].image (%s) is referenced by the VkSubpassDescription for active " "subpass (%d) of current renderPass (%s) as having layout %s, but image barrier has layout %s.", funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass, report_data->FormatHandle(rp_handle).c_str(), string_VkImageLayout(sub_image_layout), string_VkImageLayout(img_barrier.oldLayout)); } } return skip; } // Validate image barriers within a renderPass bool CoreChecks::ValidateRenderPassImageBarriers(const char *funcName, GLOBAL_CB_NODE *cb_state, uint32_t active_subpass, const safe_VkSubpassDescription2KHR &sub_desc, uint64_t rp_handle, const safe_VkSubpassDependency2KHR *dependencies, const std::vector<uint32_t> &self_dependencies, uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) { bool skip = false; for (uint32_t i = 0; i < image_mem_barrier_count; ++i) { const auto &img_barrier = image_barriers[i]; const auto &img_src_access_mask = img_barrier.srcAccessMask; const auto &img_dst_access_mask = img_barrier.dstAccessMask; bool access_mask_match = false; for (const auto self_dep_index : self_dependencies) { const auto &sub_dep = dependencies[self_dep_index]; access_mask_match = (img_src_access_mask == (sub_dep.srcAccessMask & img_src_access_mask)) && (img_dst_access_mask == (sub_dep.dstAccessMask & img_dst_access_mask)); if (access_mask_match) break; } if (!access_mask_match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency " "srcAccessMask of subpass %d of renderPass %s. Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, i, img_src_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency " "dstAccessMask of subpass %d of renderPass %s. Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, i, img_dst_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex || VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182", "%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and " "pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.", funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex); } // Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known if (VK_NULL_HANDLE == cb_state->activeFramebuffer) { assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level); // Secondary CB case w/o FB specified delay validation cb_state->cmd_execute_commands_functions.emplace_back([=](GLOBAL_CB_NODE *primary_cb, VkFramebuffer fb) { return ValidateImageBarrierImage(funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i, img_barrier); }); } else { skip |= ValidateImageBarrierImage(funcName, cb_state, cb_state->activeFramebuffer, active_subpass, sub_desc, rp_handle, i, img_barrier); } } return skip; } // Validate VUs for Pipeline Barriers that are within a renderPass // Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state bool CoreChecks::ValidateRenderPassPipelineBarriers(const char *funcName, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, VkDependencyFlags dependency_flags, uint32_t mem_barrier_count, const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count, const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) { bool skip = false; const auto rp_state = cb_state->activeRenderPass; const auto active_subpass = cb_state->activeSubpass; auto rp_handle = HandleToUint64(rp_state->renderPass); const auto &self_dependencies = rp_state->self_dependencies[active_subpass]; const auto &dependencies = rp_state->createInfo.pDependencies; if (self_dependencies.size() == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barriers cannot be set during subpass %d of renderPass %s with no self-dependency specified.", funcName, active_subpass, report_data->FormatHandle(rp_handle).c_str()); } else { // Grab ref to current subpassDescription up-front for use below const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass]; // Look for matching mask in any self-dependency bool stage_mask_match = false; for (const auto self_dep_index : self_dependencies) { const auto &sub_dep = dependencies[self_dep_index]; const auto &sub_src_stage_mask = ExpandPipelineStageFlags(device_extensions, sub_dep.srcStageMask); const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(device_extensions, sub_dep.dstStageMask); stage_mask_match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (src_stage_mask == (sub_src_stage_mask & src_stage_mask))) && ((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask))); if (stage_mask_match) break; } if (!stage_mask_match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask of any " "self-dependency of subpass %d of renderPass %s for which dstStageMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, src_stage_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask of any " "self-dependency of subpass %d of renderPass %s for which srcStageMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, dst_stage_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } if (0 != buffer_mem_barrier_count) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178", "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of renderPass %s.", funcName, buffer_mem_barrier_count, active_subpass, report_data->FormatHandle(rp_handle).c_str()); } for (uint32_t i = 0; i < mem_barrier_count; ++i) { const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask; const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask; bool access_mask_match = false; for (const auto self_dep_index : self_dependencies) { const auto &sub_dep = dependencies[self_dep_index]; access_mask_match = (mb_src_access_mask == (sub_dep.srcAccessMask & mb_src_access_mask)) && (mb_dst_access_mask == (sub_dep.dstAccessMask & mb_dst_access_mask)); if (access_mask_match) break; } if (!access_mask_match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency srcAccessMask " "for any self-dependency of subpass %d of renderPass %s for which dstAccessMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, i, mb_src_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency dstAccessMask " "for any self-dependency of subpass %d of renderPass %s for which srcAccessMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, i, mb_dst_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } } skip |= ValidateRenderPassImageBarriers(funcName, cb_state, active_subpass, sub_desc, rp_handle, dependencies, self_dependencies, image_mem_barrier_count, image_barriers); bool flag_match = false; for (const auto self_dep_index : self_dependencies) { const auto &sub_dep = dependencies[self_dep_index]; flag_match = sub_dep.dependencyFlags == dependency_flags; if (flag_match) break; } if (!flag_match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any " "self-dependency of subpass %d of renderPass %s. Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, dependency_flags, cb_state->activeSubpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } } return skip; } // Array to mask individual accessMask to corresponding stageMask // accessMask active bit position (0-31) maps to index const static VkPipelineStageFlags AccessMaskToPipeStage[28] = { // VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, // VK_ACCESS_INDEX_READ_BIT = 1 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, // VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, // VK_ACCESS_UNIFORM_READ_BIT = 3 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV, // VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, // VK_ACCESS_SHADER_READ_BIT = 5 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV, // VK_ACCESS_SHADER_WRITE_BIT = 6 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV, // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, // VK_ACCESS_TRANSFER_READ_BIT = 11 VK_PIPELINE_STAGE_TRANSFER_BIT, // VK_ACCESS_TRANSFER_WRITE_BIT = 12 VK_PIPELINE_STAGE_TRANSFER_BIT, // VK_ACCESS_HOST_READ_BIT = 13 VK_PIPELINE_STAGE_HOST_BIT, // VK_ACCESS_HOST_WRITE_BIT = 14 VK_PIPELINE_STAGE_HOST_BIT, // VK_ACCESS_MEMORY_READ_BIT = 15 VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match // VK_ACCESS_MEMORY_WRITE_BIT = 16 VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match // VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17 VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, // VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18 VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, // VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 19 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 20 VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, // VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 21 VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV | VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV, // VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 22 VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV, // VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 23 VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV, // VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 24 VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT, // VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 25 VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT, // VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 26 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, // VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 27 VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT, }; // Verify that all bits of access_mask are supported by the src_stage_mask static bool ValidateAccessMaskPipelineStage(const DeviceExtensions &extensions, VkAccessFlags access_mask, VkPipelineStageFlags stage_mask) { // Early out if all commands set, or access_mask NULL if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true; stage_mask = ExpandPipelineStageFlags(extensions, stage_mask); int index = 0; // for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set while (access_mask) { index = (u_ffs(access_mask) - 1); assert(index >= 0); // Must have "!= 0" compare to prevent warning from MSVC if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false; // early out access_mask &= ~(1 << index); // Mask off bit that's been checked } return true; } namespace barrier_queue_families { enum VuIndex { kSrcOrDstMustBeIgnore, kSpecialOrIgnoreOnly, kSrcIgnoreRequiresDstIgnore, kDstValidOrSpecialIfNotIgnore, kSrcValidOrSpecialIfNotIgnore, kSrcAndDestMustBeIgnore, kBothIgnoreOrBothValid, kSubmitQueueMustMatchSrcOrDst }; static const char *vu_summary[] = {"Source or destination queue family must be ignored.", "Source or destination queue family must be special or ignored.", "Destination queue family must be ignored if source queue family is.", "Destination queue family must be valid, ignored, or special.", "Source queue family must be valid, ignored, or special.", "Source and destination queue family must both be ignored.", "Source and destination queue family must both be ignore or both valid.", "Source or destination queue family must match submit queue family, if not ignored."}; static const std::string image_error_codes[] = { "VUID-VkImageMemoryBarrier-image-01381", // kSrcOrDstMustBeIgnore "VUID-VkImageMemoryBarrier-image-01766", // kSpecialOrIgnoreOnly "VUID-VkImageMemoryBarrier-image-01201", // kSrcIgnoreRequiresDstIgnore "VUID-VkImageMemoryBarrier-image-01768", // kDstValidOrSpecialIfNotIgnore "VUID-VkImageMemoryBarrier-image-01767", // kSrcValidOrSpecialIfNotIgnore "VUID-VkImageMemoryBarrier-image-01199", // kSrcAndDestMustBeIgnore "VUID-VkImageMemoryBarrier-image-01200", // kBothIgnoreOrBothValid "VUID-VkImageMemoryBarrier-image-01205", // kSubmitQueueMustMatchSrcOrDst }; static const std::string buffer_error_codes[] = { "VUID-VkBufferMemoryBarrier-buffer-01191", // kSrcOrDstMustBeIgnore "VUID-VkBufferMemoryBarrier-buffer-01763", // kSpecialOrIgnoreOnly "VUID-VkBufferMemoryBarrier-buffer-01193", // kSrcIgnoreRequiresDstIgnore "VUID-VkBufferMemoryBarrier-buffer-01765", // kDstValidOrSpecialIfNotIgnore "VUID-VkBufferMemoryBarrier-buffer-01764", // kSrcValidOrSpecialIfNotIgnore "VUID-VkBufferMemoryBarrier-buffer-01190", // kSrcAndDestMustBeIgnore "VUID-VkBufferMemoryBarrier-buffer-01192", // kBothIgnoreOrBothValid "VUID-VkBufferMemoryBarrier-buffer-01196", // kSubmitQueueMustMatchSrcOrDst }; class ValidatorState { public: ValidatorState(const CoreChecks *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state, const uint64_t barrier_handle64, const VkSharingMode sharing_mode, const VulkanObjectType object_type, const std::string *val_codes) : report_data_(device_data->report_data), func_name_(func_name), cb_handle64_(HandleToUint64(cb_state->commandBuffer)), barrier_handle64_(barrier_handle64), sharing_mode_(sharing_mode), object_type_(object_type), val_codes_(val_codes), limit_(static_cast<uint32_t>(device_data->physical_device_state->queue_family_properties.size())), mem_ext_(device_data->device_extensions.vk_khr_external_memory) {} // Create a validator state from an image state... reducing the image specific to the generic version. ValidatorState(const CoreChecks *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state, const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state) : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->image), state->createInfo.sharingMode, kVulkanObjectTypeImage, image_error_codes) {} // Create a validator state from an buffer state... reducing the buffer specific to the generic version. ValidatorState(const CoreChecks *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state, const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state) : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->buffer), state->createInfo.sharingMode, kVulkanObjectTypeImage, buffer_error_codes) {} // Log the messages using boilerplate from object state, and Vu specific information from the template arg // One and two family versions, in the single family version, Vu holds the name of the passed parameter bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const { const std::string &val_code = val_codes_[vu_index]; const char *annotation = GetFamilyAnnotation(family); return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_, val_code, "%s: Barrier using %s %s created with sharingMode %s, has %s %u%s. %s", func_name_, GetTypeString(), report_data_->FormatHandle(barrier_handle64_).c_str(), GetModeString(), param_name, family, annotation, vu_summary[vu_index]); } bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const { const std::string &val_code = val_codes_[vu_index]; const char *src_annotation = GetFamilyAnnotation(src_family); const char *dst_annotation = GetFamilyAnnotation(dst_family); return log_msg( report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_, val_code, "%s: Barrier using %s %s created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s", func_name_, GetTypeString(), report_data_->FormatHandle(barrier_handle64_).c_str(), GetModeString(), src_family, src_annotation, dst_family, dst_annotation, vu_summary[vu_index]); } // This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed // data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for // application input. static bool ValidateAtQueueSubmit(const VkQueue queue, const CoreChecks *device_data, uint32_t src_family, uint32_t dst_family, const ValidatorState &val) { auto queue_data_it = device_data->queueMap.find(queue); if (queue_data_it == device_data->queueMap.end()) return false; uint32_t queue_family = queue_data_it->second.queueFamilyIndex; if ((src_family != queue_family) && (dst_family != queue_family)) { const std::string &val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst]; const char *src_annotation = val.GetFamilyAnnotation(src_family); const char *dst_annotation = val.GetFamilyAnnotation(dst_family); return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue), val_code, "%s: Barrier submitted to queue with family index %u, using %s %s created with sharingMode %s, has " "srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s", "vkQueueSubmit", queue_family, val.GetTypeString(), device_data->report_data->FormatHandle(val.barrier_handle64_).c_str(), val.GetModeString(), src_family, src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]); } return false; } // Logical helpers for semantic clarity inline bool KhrExternalMem() const { return mem_ext_; } inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); } inline bool IsValidOrSpecial(uint32_t queue_family) const { return IsValid(queue_family) || (mem_ext_ && IsSpecial(queue_family)); } inline bool IsIgnored(uint32_t queue_family) const { return queue_family == VK_QUEUE_FAMILY_IGNORED; } // Helpers for LogMsg (and log_msg) const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); } // Descriptive text for the various types of queue family index const char *GetFamilyAnnotation(uint32_t family) const { const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)"; const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)"; const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)"; const char *valid = " (VALID)"; const char *invalid = " (INVALID)"; switch (family) { case VK_QUEUE_FAMILY_EXTERNAL_KHR: return external; case VK_QUEUE_FAMILY_FOREIGN_EXT: return foreign; case VK_QUEUE_FAMILY_IGNORED: return ignored; default: if (IsValid(family)) { return valid; } return invalid; }; } const char *GetTypeString() const { return object_string[object_type_]; } VkSharingMode GetSharingMode() const { return sharing_mode_; } protected: const debug_report_data *const report_data_; const char *const func_name_; const uint64_t cb_handle64_; const uint64_t barrier_handle64_; const VkSharingMode sharing_mode_; const VulkanObjectType object_type_; const std::string *val_codes_; const uint32_t limit_; const bool mem_ext_; }; bool Validate(const CoreChecks *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state, const ValidatorState &val, const uint32_t src_queue_family, const uint32_t dst_queue_family) { bool skip = false; const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT; const bool src_ignored = val.IsIgnored(src_queue_family); const bool dst_ignored = val.IsIgnored(dst_queue_family); if (val.KhrExternalMem()) { if (mode_concurrent) { if (!(src_ignored || dst_ignored)) { skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family); } if ((src_ignored && !(dst_ignored || IsSpecial(dst_queue_family))) || (dst_ignored && !(src_ignored || IsSpecial(src_queue_family)))) { skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family); } } else { // VK_SHARING_MODE_EXCLUSIVE if (src_ignored && !dst_ignored) { skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, src_queue_family, dst_queue_family); } if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) { skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, dst_queue_family, "dstQueueFamilyIndex"); } if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) { skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, src_queue_family, "srcQueueFamilyIndex"); } } } else { // No memory extension if (mode_concurrent) { if (!src_ignored || !dst_ignored) { skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family); } } else { // VK_SHARING_MODE_EXCLUSIVE if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) { skip |= val.LogMsg(kBothIgnoreOrBothValid, src_queue_family, dst_queue_family); } } } if (!mode_concurrent && !src_ignored && !dst_ignored) { // Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria // TODO create a better named list, or rename the submit time lists to something that matches the broader usage... // Note: if we want to create a semantic that separates state lookup, validation, and state update this should go // to a local queue of update_state_actions or something. cb_state->eventUpdates.emplace_back([device_data, src_queue_family, dst_queue_family, val](VkQueue queue) { return ValidatorState::ValidateAtQueueSubmit(queue, device_data, src_queue_family, dst_queue_family, val); }); } return skip; } } // namespace barrier_queue_families // Type specific wrapper for image barriers bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, GLOBAL_CB_NODE *cb_state, const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state_data) { // State data is required if (!state_data) { return false; } // Create the validator state from the image state barrier_queue_families::ValidatorState val(this, func_name, cb_state, barrier, state_data); const uint32_t src_queue_family = barrier->srcQueueFamilyIndex; const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex; return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family); } // Type specific wrapper for buffer barriers bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, GLOBAL_CB_NODE *cb_state, const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state_data) { // State data is required if (!state_data) { return false; } // Create the validator state from the buffer state barrier_queue_families::ValidatorState val(this, func_name, cb_state, barrier, state_data); const uint32_t src_queue_family = barrier->srcQueueFamilyIndex; const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex; return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family); } bool CoreChecks::ValidateBarriers(const char *funcName, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount, const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount, const VkImageMemoryBarrier *pImageMemBarriers) { bool skip = false; for (uint32_t i = 0; i < memBarrierCount; ++i) { const auto &mem_barrier = pMemBarriers[i]; if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184", "%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i, mem_barrier.srcAccessMask, src_stage_mask); } if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185", "%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i, mem_barrier.dstAccessMask, dst_stage_mask); } } for (uint32_t i = 0; i < imageMemBarrierCount; ++i) { auto mem_barrier = &pImageMemBarriers[i]; if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier->srcAccessMask, src_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184", "%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i, mem_barrier->srcAccessMask, src_stage_mask); } if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier->dstAccessMask, dst_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185", "%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i, mem_barrier->dstAccessMask, dst_stage_mask); } auto image_data = GetImageState(mem_barrier->image); skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, image_data); if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-newLayout-01198", "%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName); } if (image_data) { // There is no VUID for this, but there is blanket text: // "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before // recording commands in a command buffer." // TODO: Update this when VUID is defined skip |= ValidateMemoryIsBoundToImage(image_data, funcName, kVUIDUndefined); auto aspect_mask = mem_barrier->subresourceRange.aspectMask; skip |= ValidateImageAspectMask(image_data->image, image_data->createInfo.format, aspect_mask, funcName); std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange"; skip |= ValidateImageBarrierSubresourceRange(image_data, mem_barrier->subresourceRange, funcName, param_name.c_str()); } } for (uint32_t i = 0; i < bufferBarrierCount; ++i) { auto mem_barrier = &pBufferMemBarriers[i]; if (!mem_barrier) continue; if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier->srcAccessMask, src_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184", "%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i, mem_barrier->srcAccessMask, src_stage_mask); } if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier->dstAccessMask, dst_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185", "%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i, mem_barrier->dstAccessMask, dst_stage_mask); } // Validate buffer barrier queue family indices auto buffer_state = GetBufferState(mem_barrier->buffer); skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, buffer_state); if (buffer_state) { // There is no VUID for this, but there is blanket text: // "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before // recording commands in a command buffer" // TODO: Update this when VUID is defined skip |= ValidateMemoryIsBoundToBuffer(buffer_state, funcName, kVUIDUndefined); auto buffer_size = buffer_state->createInfo.size; if (mem_barrier->offset >= buffer_size) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-offset-01187", "%s: Buffer Barrier %s has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".", funcName, report_data->FormatHandle(mem_barrier->buffer).c_str(), HandleToUint64(mem_barrier->offset), HandleToUint64(buffer_size)); } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-size-01189", "%s: Buffer Barrier %s has offset 0x%" PRIx64 " and size 0x%" PRIx64 " whose sum is greater than total size 0x%" PRIx64 ".", funcName, report_data->FormatHandle(mem_barrier->buffer).c_str(), HandleToUint64(mem_barrier->offset), HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size)); } } } skip |= ValidateBarriersQFOTransferUniqueness(funcName, cb_state, bufferBarrierCount, pBufferMemBarriers, imageMemBarrierCount, pImageMemBarriers); return skip; } bool CoreChecks::ValidateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) { bool skip = false; VkPipelineStageFlags stageMask = 0; for (uint32_t i = 0; i < eventCount; ++i) { auto event = pCB->events[firstEventIndex + i]; auto queue_data = queueMap.find(queue); if (queue_data == queueMap.end()) return false; auto event_data = queue_data->second.eventToStageMap.find(event); if (event_data != queue_data->second.eventToStageMap.end()) { stageMask |= event_data->second; } else { auto global_event_data = GetEventNode(event); if (!global_event_data) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, HandleToUint64(event), kVUID_Core_DrawState_InvalidEvent, "Event %s cannot be waited on if it has never been set.", report_data->FormatHandle(event).c_str()); } else { stageMask |= global_event_data->stageMask; } } } // TODO: Need to validate that host_bit is only set if set event is called // but set event can be called at any time. if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), "VUID-vkCmdWaitEvents-srcStageMask-parameter", "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of " "the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with " "vkSetEvent but instead is 0x%X.", sourceStageMask, stageMask); } return skip; } // Note that we only check bits that HAVE required queueflags -- don't care entries are skipped static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = { {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT}, {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT}, {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT}, {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT}, {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}}; static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT}; bool CoreChecks::CheckStageMaskQueueCompatibility(VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask, VkQueueFlags queue_flags, const char *function, const char *src_or_dest, const char *error_code) { bool skip = false; // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags for (const auto &item : stage_flag_bit_array) { if (stage_mask & item) { if ((supported_pipeline_stages_table[item] & queue_flags) == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), error_code, "%s(): %s flag %s is not compatible with the queue family properties of this command buffer.", function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item))); } } } return skip; } // Check if all barriers are of a given operation type. template <typename Barrier, typename OpCheck> bool AllTransferOp(const COMMAND_POOL_NODE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) { if (!pool) return false; for (uint32_t b = 0; b < count; b++) { if (!op_check(pool, barriers + b)) return false; } return true; } // Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(GLOBAL_CB_NODE *cb_state, uint32_t buffer_barrier_count, const VkBufferMemoryBarrier *buffer_barriers, uint32_t image_barrier_count, const VkImageMemoryBarrier *image_barriers) { auto pool = GetCommandPoolNode(cb_state->createInfo.commandPool); BarrierOperationsType op_type = kGeneral; // Look at the barrier details only if they exist // Note: AllTransferOp returns true for count == 0 if ((buffer_barrier_count + image_barrier_count) != 0) { if (AllTransferOp(pool, TempIsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) && AllTransferOp(pool, TempIsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) { op_type = kAllRelease; } else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) && AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) { op_type = kAllAcquire; } } return op_type; } bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(GLOBAL_CB_NODE const *cb_state, VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask, BarrierOperationsType barrier_op_type, const char *function, const char *error_code) { bool skip = false; uint32_t queue_family_index = commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex; auto physical_device_state = GetPhysicalDeviceState(); // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool // that commandBuffer was allocated from, as specified in the table of supported pipeline stages. if (queue_family_index < physical_device_state->queue_family_properties.size()) { VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags; // Only check the source stage mask if any barriers aren't "acquire ownership" if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) { skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, source_stage_mask, specified_queue_flags, function, "srcStageMask", error_code); } // Only check the dest stage mask if any barriers aren't "release ownership" if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) { skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, dest_stage_mask, specified_queue_flags, function, "dstStageMask", error_code); } } return skip; } bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); bool skip = ValidateStageMasksAgainstQueueCapabilities(cb_state, sourceStageMask, dstStageMask, barrier_op_type, "vkCmdWaitEvents", "VUID-vkCmdWaitEvents-srcStageMask-01164"); skip |= ValidateStageMaskGsTsEnables(sourceStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-srcStageMask-01159", "VUID-vkCmdWaitEvents-srcStageMask-01161", "VUID-vkCmdWaitEvents-srcStageMask-02111", "VUID-vkCmdWaitEvents-srcStageMask-02112"); skip |= ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-dstStageMask-01160", "VUID-vkCmdWaitEvents-dstStageMask-01162", "VUID-vkCmdWaitEvents-dstStageMask-02113", "VUID-vkCmdWaitEvents-dstStageMask-02114"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdWaitEvents-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()"); skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()"); skip |= ValidateBarriers("vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); return skip; } void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); auto first_event_index = cb_state->events.size(); for (uint32_t i = 0; i < eventCount; ++i) { auto event_state = GetEventNode(pEvents[i]); if (event_state) { AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent}, cb_state); event_state->cb_bindings.insert(cb_state); } cb_state->waitedEvents.insert(pEvents[i]); cb_state->events.push_back(pEvents[i]); } cb_state->eventUpdates.emplace_back( [=](VkQueue q) { return ValidateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask); }); TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers); if (GetEnables()->gpu_validation) { GpuPreCallValidateCmdWaitEvents(sourceStageMask); } } void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); RecordBarriersQFOTransfers(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = false; auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); skip |= ValidateStageMasksAgainstQueueCapabilities(cb_state, srcStageMask, dstStageMask, barrier_op_type, "vkCmdPipelineBarrier", "VUID-vkCmdPipelineBarrier-srcStageMask-01183"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPipelineBarrier()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdPipelineBarrier-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()"); skip |= ValidateStageMaskGsTsEnables(srcStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-srcStageMask-01168", "VUID-vkCmdPipelineBarrier-srcStageMask-01170", "VUID-vkCmdPipelineBarrier-srcStageMask-02115", "VUID-vkCmdPipelineBarrier-srcStageMask-02116"); skip |= ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-dstStageMask-01169", "VUID-vkCmdPipelineBarrier-dstStageMask-01171", "VUID-vkCmdPipelineBarrier-dstStageMask-02117", "VUID-vkCmdPipelineBarrier-dstStageMask-02118"); if (cb_state->activeRenderPass) { skip |= ValidateRenderPassPipelineBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); if (skip) return true; // Early return to avoid redundant errors from below calls } skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()"); skip |= ValidateBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); return skip; } void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); RecordBarriersQFOTransfers(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers); } bool CoreChecks::SetQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) { GLOBAL_CB_NODE *pCB = GetCBNode(commandBuffer); if (pCB) { pCB->queryToStateMap[object] = value; } auto queue_data = queueMap.find(queue); if (queue_data != queueMap.end()) { queue_data->second.queryToStateMap[object] = value; } return false; } bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { if (disabled.query_validation) return false; GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdBeginQuery-commandBuffer-cmdpool"); auto queryType = GetQueryPoolNode(queryPool)->createInfo.queryType; if (flags & VK_QUERY_CONTROL_PRECISE_BIT) { if (!enabled_features.core.occlusionQueryPrecise) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBeginQuery-queryType-00800", "VK_QUERY_CONTROL_PRECISE_BIT provided to vkCmdBeginQuery, but precise occlusion queries not enabled " "on the device."); } if (queryType != VK_QUERY_TYPE_OCCLUSION) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBeginQuery-queryType-00800", "VK_QUERY_CONTROL_PRECISE_BIT provided to vkCmdBeginQuery, but pool query type is not VK_QUERY_TYPE_OCCLUSION"); } } skip |= ValidateCmd(cb_state, CMD_BEGINQUERY, "vkCmdBeginQuery()"); return skip; } void CoreChecks::PostCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); QueryObject query = {queryPool, slot}; cb_state->activeQueries.insert(query); cb_state->startedQueries.insert(query); AddCommandBufferBinding(&GetQueryPoolNode(queryPool)->cb_bindings, {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state); } bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { if (disabled.query_validation) return false; QueryObject query = {queryPool, slot}; GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = false; if (!cb_state->activeQueries.count(query)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdEndQuery-None-01923", "Ending a query before it was started: queryPool %s, index %d.", report_data->FormatHandle(queryPool).c_str(), slot); } skip |= ValidateCmdQueueFlags(cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdEndQuery-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_ENDQUERY, "VkCmdEndQuery()"); return skip; } void CoreChecks::PostCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { QueryObject query = {queryPool, slot}; GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->activeQueries.erase(query); cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, true); }); AddCommandBufferBinding(&GetQueryPoolNode(queryPool)->cb_bindings, {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state); } bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { if (disabled.query_validation) return false; GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); bool skip = InsideRenderPass(cb_state, "vkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-renderpass"); skip |= ValidateCmd(cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()"); skip |= ValidateCmdQueueFlags(cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdResetQueryPool-commandBuffer-cmdpool"); return skip; } void CoreChecks::PostCallRecordCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); for (uint32_t i = 0; i < queryCount; i++) { QueryObject query = {queryPool, firstQuery + i}; cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents; cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, false); }); } AddCommandBufferBinding(&GetQueryPoolNode(queryPool)->cb_bindings, {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state); } bool CoreChecks::IsQueryInvalid(QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) { QueryObject query = {queryPool, queryIndex}; auto query_data = queue_data->queryToStateMap.find(query); if (query_data != queue_data->queryToStateMap.end()) { if (!query_data->second) return true; } else { auto it = queryToStateMap.find(query); if (it == queryToStateMap.end() || !it->second) return true; } return false; } bool CoreChecks::ValidateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { bool skip = false; auto queue_data = GetQueueState(queue); if (!queue_data) return false; for (uint32_t i = 0; i < queryCount; i++) { if (IsQueryInvalid(queue_data, queryPool, firstQuery + i)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidQuery, "Requesting a copy from query to buffer with invalid query: queryPool %s, index %d", report_data->FormatHandle(queryPool).c_str(), firstQuery + i); } } return skip; } bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { if (disabled.query_validation) return false; auto cb_state = GetCBNode(commandBuffer); auto dst_buff_state = GetBufferState(dstBuffer); assert(cb_state); assert(dst_buff_state); bool skip = ValidateMemoryIsBoundToBuffer(dst_buff_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826"); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdCopyQueryPoolResults()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdCopyQueryPoolResults-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()"); skip |= InsideRenderPass(cb_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-renderpass"); return skip; } void CoreChecks::PostCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { auto cb_state = GetCBNode(commandBuffer); auto dst_buff_state = GetBufferState(dstBuffer); AddCommandBufferBindingBuffer(cb_state, dst_buff_state); cb_state->queryUpdates.emplace_back([=](VkQueue q) { return ValidateQuery(q, cb_state, queryPool, firstQuery, queryCount); }); AddCommandBufferBinding(&GetQueryPoolNode(queryPool)->cb_bindings, {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state); } bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void *pValues) { bool skip = false; GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdPushConstants-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()"); skip |= ValidatePushConstantRange(offset, size, "vkCmdPushConstants()"); if (0 == stageFlags) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-stageFlags-requiredbitmask", "vkCmdPushConstants() call has no stageFlags set."); } // Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command // stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range. if (!skip) { const auto &ranges = *GetPipelineLayout(layout)->push_constant_ranges; VkShaderStageFlags found_stages = 0; for (const auto &range : ranges) { if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) { VkShaderStageFlags matching_stages = range.stageFlags & stageFlags; if (matching_stages != range.stageFlags) { // "VUID-vkCmdPushConstants-offset-01796" VUID-vkCmdPushConstants-offset-01796 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-offset-01796", "vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32 "), must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32 "), offset (%" PRIu32 "), and size (%" PRIu32 ") in pipeline layout %s.", (uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size, report_data->FormatHandle(layout).c_str()); } // Accumulate all stages we've found found_stages = matching_stages | found_stages; } } if (found_stages != stageFlags) { // "VUID-vkCmdPushConstants-offset-01795" VUID-vkCmdPushConstants-offset-01795 uint32_t missing_stages = ~found_stages & stageFlags; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-offset-01795", "vkCmdPushConstants(): stageFlags = 0x%" PRIx32 ", VkPushConstantRange in pipeline layout %s overlapping offset = %d and size = %d, do not contain " "stageFlags 0x%" PRIx32 ".", (uint32_t)stageFlags, report_data->FormatHandle(layout).c_str(), offset, size, missing_stages); } } return skip; } bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { if (disabled.query_validation) return false; GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdWriteTimestamp()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT, "VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()"); return skip; } void CoreChecks::PostCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); QueryObject query = {queryPool, slot}; cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, true); }); } bool CoreChecks::MatchUsage(uint32_t count, const VkAttachmentReference2KHR *attachments, const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag, const char *error_code) { bool skip = false; for (uint32_t attach = 0; attach < count; attach++) { if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) { // Attachment counts are verified elsewhere, but prevent an invalid access if (attachments[attach].attachment < fbci->attachmentCount) { const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment]; auto view_state = GetImageViewState(*image_view); if (view_state) { const VkImageCreateInfo *ici = &GetImageState(view_state->create_info.image)->createInfo; if (ici != nullptr) { if ((ici->usage & usage_flag) == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, error_code, "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's " "IMAGE_USAGE flags (%s).", attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag)); } } } } } } return skip; } // Validate VkFramebufferCreateInfo which includes: // 1. attachmentCount equals renderPass attachmentCount // 2. corresponding framebuffer and renderpass attachments have matching formats // 3. corresponding framebuffer and renderpass attachments have matching sample counts // 4. fb attachments only have a single mip level // 5. fb attachment dimensions are each at least as large as the fb // 6. fb attachments use idenity swizzle // 7. fb attachments used by renderPass for color/input/ds have correct usage bit set // 8. fb dimensions are within physical device limits bool CoreChecks::ValidateFramebufferCreateInfo(const VkFramebufferCreateInfo *pCreateInfo) { bool skip = false; auto rp_state = GetRenderPassState(pCreateInfo->renderPass); if (rp_state) { const VkRenderPassCreateInfo2KHR *rpci = rp_state->createInfo.ptr(); if (rpci->attachmentCount != pCreateInfo->attachmentCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-attachmentCount-00876", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount " "of %u of renderPass (%s) being used to create Framebuffer.", pCreateInfo->attachmentCount, rpci->attachmentCount, report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } else { // attachmentCounts match, so make sure corresponding attachment details line up const VkImageView *image_views = pCreateInfo->pAttachments; for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { auto view_state = GetImageViewState(image_views[i]); auto &ivci = view_state->create_info; if (ivci.format != rpci->pAttachments[i].format) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00880", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not " "match the format of %s used by the corresponding attachment for renderPass (%s).", i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } const VkImageCreateInfo *ici = &GetImageState(ivci.image)->createInfo; if (ici->samples != rpci->pAttachments[i].samples) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00881", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match the %s " "samples used by the corresponding attachment for renderPass (%s).", i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } // Verify that view only has a single mip level if (ivci.subresourceRange.levelCount != 1) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-pAttachments-00883", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but " "only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.", i, ivci.subresourceRange.levelCount); } const uint32_t mip_level = ivci.subresourceRange.baseMipLevel; uint32_t mip_width = max(1u, ici->extent.width >> mip_level); uint32_t mip_height = max(1u, ici->extent.height >> mip_level); if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) || (mip_height < pCreateInfo->height)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-pAttachments-00882", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions " "smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for " "attachment #%u, framebuffer:\n" "width: %u, %u\n" "height: %u, %u\n" "layerCount: %u, %u\n", i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height, pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers); } if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) || ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) || ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) || ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-pAttachments-00884", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All " "framebuffer attachments must have been created with the identity swizzle. Here are the actual " "swizzle values:\n" "r swizzle = %s\n" "g swizzle = %s\n" "b swizzle = %s\n" "a swizzle = %s\n", i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g), string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a)); } } } // Verify correct attachment usage flags for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) { // Verify input attachments: skip |= MatchUsage(rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879"); // Verify color attachments: skip |= MatchUsage(rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877"); // Verify depth/stencil attachments: if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) { skip |= MatchUsage(1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633"); } } } // Verify FB dimensions are within physical device limits if (pCreateInfo->width > phys_dev_props.limits.maxFramebufferWidth) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-width-00886", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested " "width: %u, device max: %u\n", pCreateInfo->width, phys_dev_props.limits.maxFramebufferWidth); } if (pCreateInfo->height > phys_dev_props.limits.maxFramebufferHeight) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-height-00888", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested " "height: %u, device max: %u\n", pCreateInfo->height, phys_dev_props.limits.maxFramebufferHeight); } if (pCreateInfo->layers > phys_dev_props.limits.maxFramebufferLayers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-layers-00890", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested " "layers: %u, device max: %u\n", pCreateInfo->layers, phys_dev_props.limits.maxFramebufferLayers); } // Verify FB dimensions are greater than zero if (pCreateInfo->width <= 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-width-00885", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero."); } if (pCreateInfo->height <= 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-height-00887", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero."); } if (pCreateInfo->layers <= 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-layers-00889", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero."); } return skip; } bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) { // TODO : Verify that renderPass FB is created with is compatible with FB bool skip = false; skip |= ValidateFramebufferCreateInfo(pCreateInfo); return skip; } void CoreChecks::PostCallRecordCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer, VkResult result) { if (VK_SUCCESS != result) return; // Shadow create info and store in map std::unique_ptr<FRAMEBUFFER_STATE> fb_state( new FRAMEBUFFER_STATE(*pFramebuffer, pCreateInfo, GetRenderPassStateSharedPtr(pCreateInfo->renderPass))); for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { VkImageView view = pCreateInfo->pAttachments[i]; auto view_state = GetImageViewState(view); if (!view_state) { continue; } } frameBufferMap[*pFramebuffer] = std::move(fb_state); } static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node, std::unordered_set<uint32_t> &processed_nodes) { // If we have already checked this node we have not found a dependency path so return false. if (processed_nodes.count(index)) return false; processed_nodes.insert(index); const DAGNode &node = subpass_to_node[index]; // Look for a dependency path. If one exists return true else recurse on the previous nodes. if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) { for (auto elem : node.prev) { if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true; } } else { return true; } return false; } bool CoreChecks::CheckDependencyExists(const uint32_t subpass, const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node, bool &skip) { bool result = true; // Loop through all subpasses that share the same attachment and make sure a dependency exists for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) { if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue; const DAGNode &node = subpass_to_node[subpass]; // Check for a specified dependency between the two nodes. If one exists we are done. auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]); auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]); if (prev_elem == node.prev.end() && next_elem == node.next.end()) { // If no dependency exits an implicit dependency still might. If not, throw an error. std::unordered_set<uint32_t> processed_nodes; if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) || FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidRenderpass, "A dependency between subpasses %d and %d must exist but one is not specified.", subpass, dependent_subpasses[k]); result = false; } } } return result; } bool CoreChecks::CheckPreserved(const VkRenderPassCreateInfo2KHR *pCreateInfo, const int index, const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) { const DAGNode &node = subpass_to_node[index]; // If this node writes to the attachment return true as next nodes need to preserve the attachment. const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[index]; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { if (attachment == subpass.pColorAttachments[j].attachment) return true; } for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { if (attachment == subpass.pInputAttachments[j].attachment) return true; } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (attachment == subpass.pDepthStencilAttachment->attachment) return true; } bool result = false; // Loop through previous nodes and see if any of them write to the attachment. for (auto elem : node.prev) { result |= CheckPreserved(pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip); } // If the attachment was written to by a previous node than this node needs to preserve it. if (result && depth > 0) { bool has_preserved = false; for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { if (subpass.pPreserveAttachments[j] == attachment) { has_preserved = true; break; } } if (!has_preserved) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidRenderpass, "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index); } } return result; } template <class T> bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) { return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) || ((offset1 > offset2) && (offset1 < (offset2 + size2))); } bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) { return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) && IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount)); } bool CoreChecks::ValidateDependencies(FRAMEBUFFER_STATE const *framebuffer, RENDER_PASS_STATE const *renderPass) { bool skip = false; auto const pFramebufferInfo = framebuffer->createInfo.ptr(); auto const pCreateInfo = renderPass->createInfo.ptr(); auto const &subpass_to_node = renderPass->subpassToNode; std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount); std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount); std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount); // Find overlapping attachments for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) { VkImageView viewi = pFramebufferInfo->pAttachments[i]; VkImageView viewj = pFramebufferInfo->pAttachments[j]; if (viewi == viewj) { overlapping_attachments[i].push_back(j); overlapping_attachments[j].push_back(i); continue; } auto view_state_i = GetImageViewState(viewi); auto view_state_j = GetImageViewState(viewj); if (!view_state_i || !view_state_j) { continue; } auto view_ci_i = view_state_i->create_info; auto view_ci_j = view_state_j->create_info; if (view_ci_i.image == view_ci_j.image && IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) { overlapping_attachments[i].push_back(j); overlapping_attachments[j].push_back(i); continue; } auto image_data_i = GetImageState(view_ci_i.image); auto image_data_j = GetImageState(view_ci_j.image); if (!image_data_i || !image_data_j) { continue; } if (image_data_i->binding.mem == image_data_j->binding.mem && IsRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset, image_data_j->binding.size)) { overlapping_attachments[i].push_back(j); overlapping_attachments[j].push_back(i); } } } // Find for each attachment the subpasses that use them. unordered_set<uint32_t> attachmentIndices; for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i]; attachmentIndices.clear(); for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { uint32_t attachment = subpass.pInputAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; input_attachment_to_subpass[attachment].push_back(i); for (auto overlapping_attachment : overlapping_attachments[attachment]) { input_attachment_to_subpass[overlapping_attachment].push_back(i); } } for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { uint32_t attachment = subpass.pColorAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; output_attachment_to_subpass[attachment].push_back(i); for (auto overlapping_attachment : overlapping_attachments[attachment]) { output_attachment_to_subpass[overlapping_attachment].push_back(i); } attachmentIndices.insert(attachment); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { uint32_t attachment = subpass.pDepthStencilAttachment->attachment; output_attachment_to_subpass[attachment].push_back(i); for (auto overlapping_attachment : overlapping_attachments[attachment]) { output_attachment_to_subpass[overlapping_attachment].push_back(i); } if (attachmentIndices.count(attachment)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidRenderpass, "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i); } } } // If there is a dependency needed make sure one exists for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i]; // If the attachment is an input then all subpasses that output must have a dependency relationship for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { uint32_t attachment = subpass.pInputAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; CheckDependencyExists(i, output_attachment_to_subpass[attachment], subpass_to_node, skip); } // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { uint32_t attachment = subpass.pColorAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; CheckDependencyExists(i, output_attachment_to_subpass[attachment], subpass_to_node, skip); CheckDependencyExists(i, input_attachment_to_subpass[attachment], subpass_to_node, skip); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment; CheckDependencyExists(i, output_attachment_to_subpass[attachment], subpass_to_node, skip); CheckDependencyExists(i, input_attachment_to_subpass[attachment], subpass_to_node, skip); } } // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was // written. for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i]; for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { CheckPreserved(pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip); } } return skip; } void CoreChecks::RecordRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2KHR *pCreateInfo, RENDER_PASS_STATE *render_pass) { auto &subpass_to_node = render_pass->subpassToNode; subpass_to_node.resize(pCreateInfo->subpassCount); auto &self_dependencies = render_pass->self_dependencies; self_dependencies.resize(pCreateInfo->subpassCount); for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { subpass_to_node[i].pass = i; self_dependencies[i].clear(); } for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { const VkSubpassDependency2KHR &dependency = pCreateInfo->pDependencies[i]; if ((dependency.srcSubpass != VK_SUBPASS_EXTERNAL) && (dependency.dstSubpass != VK_SUBPASS_EXTERNAL)) { if (dependency.srcSubpass == dependency.dstSubpass) { self_dependencies[dependency.srcSubpass].push_back(i); } else { subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass); subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass); } } } } bool CoreChecks::ValidateRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2KHR *pCreateInfo, RENDER_PASS_STATE *render_pass) { // Shorthand... auto &subpass_to_node = render_pass->subpassToNode; subpass_to_node.resize(pCreateInfo->subpassCount); auto &self_dependencies = render_pass->self_dependencies; self_dependencies.resize(pCreateInfo->subpassCount); bool skip = false; const char *vuid; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { subpass_to_node[i].pass = i; self_dependencies[i].clear(); } for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { const VkSubpassDependency2KHR &dependency = pCreateInfo->pDependencies[i]; VkPipelineStageFlags exclude_graphics_pipeline_stages = ~(VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT | ExpandPipelineStageFlags(device_extensions, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT)); VkPipelineStageFlagBits latest_src_stage = GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask); VkPipelineStageFlagBits earliest_dst_stage = GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask); // This VU is actually generalised to *any* pipeline - not just graphics - but only graphics render passes are // currently supported by the spec - so only that pipeline is checked here. // If that is ever relaxed, this check should be extended to cover those pipelines. if (dependency.srcSubpass == dependency.dstSubpass && (dependency.srcStageMask & exclude_graphics_pipeline_stages) != 0u && (dependency.dstStageMask & exclude_graphics_pipeline_stages) != 0u) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-02244" : "VUID-VkSubpassDependency-srcSubpass-01989"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u is a self-dependency, but specifies stage masks that contain stages not in the GRAPHICS pipeline.", i); } else if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL && (dependency.srcStageMask & VK_PIPELINE_STAGE_HOST_BIT)) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03078" : "VUID-VkSubpassDependency-srcSubpass-00858"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a dependency from subpass %u, but includes HOST_BIT in the source stage mask.", i, dependency.srcSubpass); } else if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL && (dependency.dstStageMask & VK_PIPELINE_STAGE_HOST_BIT)) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-dstSubpass-03079" : "VUID-VkSubpassDependency-dstSubpass-00859"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a dependency to subpass %u, but includes HOST_BIT in the destination stage mask.", i, dependency.dstSubpass); } // These next two VUs are actually generalised to *any* pipeline - not just graphics - but only graphics render passes are // currently supported by the spec - so only that pipeline is checked here. // If that is ever relaxed, these next two checks should be extended to cover those pipelines. else if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL && pCreateInfo->pSubpasses[dependency.srcSubpass].pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS && (dependency.srcStageMask & exclude_graphics_pipeline_stages) != 0u) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03054" : "VUID-VkRenderPassCreateInfo-pDependencies-00837"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a source stage mask that contains stages not in the GRAPHICS pipeline as used " "by the source subpass %u.", i, dependency.srcSubpass); } else if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL && pCreateInfo->pSubpasses[dependency.dstSubpass].pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS && (dependency.dstStageMask & exclude_graphics_pipeline_stages) != 0u) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03055" : "VUID-VkRenderPassCreateInfo-pDependencies-00838"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a destination stage mask that contains stages not in the GRAPHICS pipeline as " "used by the destination subpass %u.", i, dependency.dstSubpass); } // The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if // any are, which enables multiview. else if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && (pCreateInfo->pSubpasses[0].viewMask == 0)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo2KHR-viewMask-03059", "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i); } else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDependency2KHR-dependencyFlags-03092", "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i, dependency.viewOffset); } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) { if (dependency.srcSubpass == dependency.dstSubpass) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "The src and dst subpasses in dependency %u are both external.", i); } else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) { if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) { vuid = "VUID-VkSubpassDependency-dependencyFlags-02520"; } else { // dependency.dstSubpass == VK_SUBPASS_EXTERNAL vuid = "VUID-VkSubpassDependency-dependencyFlags-02521"; } if (use_rp2) { // Create render pass 2 distinguishes between source and destination external dependencies. if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) { vuid = "VUID-VkSubpassDependency2KHR-dependencyFlags-03090"; } else { vuid = "VUID-VkSubpassDependency2KHR-dependencyFlags-03091"; } } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i); } } else if (dependency.srcSubpass > dependency.dstSubpass) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is " "disallowed to prevent cyclic dependencies.", i, dependency.srcSubpass, dependency.dstSubpass); } else if (dependency.srcSubpass == dependency.dstSubpass) { if (dependency.viewOffset != 0) { vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pNext-01930"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i, dependency.viewOffset); } else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags && pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not " "specify VK_DEPENDENCY_VIEW_LOCAL_BIT.", i, dependency.srcSubpass); } else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) || HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) && (GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) > GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).", i, string_VkPipelineStageFlagBits(latest_src_stage), string_VkPipelineStageFlagBits(earliest_dst_stage)); } else { self_dependencies[dependency.srcSubpass].push_back(i); } } else { subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass); subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass); } } return skip; } bool CoreChecks::ValidateAttachmentIndex(RenderPassCreateVersion rp_version, uint32_t attachment, uint32_t attachment_count, const char *type) { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()"; if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) { const char *vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: %s attachment %d must be less than the total number of attachments %d.", type, function_name, attachment, attachment_count); } return skip; } enum AttachmentType { ATTACHMENT_COLOR = 1, ATTACHMENT_DEPTH = 2, ATTACHMENT_INPUT = 4, ATTACHMENT_PRESERVE = 8, ATTACHMENT_RESOLVE = 16, }; char const *StringAttachmentType(uint8_t type) { switch (type) { case ATTACHMENT_COLOR: return "color"; case ATTACHMENT_DEPTH: return "depth"; case ATTACHMENT_INPUT: return "input"; case ATTACHMENT_PRESERVE: return "preserve"; case ATTACHMENT_RESOLVE: return "resolve"; default: return "(multiple)"; } } bool CoreChecks::AddAttachmentUse(RenderPassCreateVersion rp_version, uint32_t subpass, std::vector<uint8_t> &attachment_uses, std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use, VkImageLayout new_layout) { if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */ bool skip = false; auto &uses = attachment_uses[attachment]; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()"; if (uses & new_use) { if (attachment_layouts[attachment] != new_layout) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-layout-02528" : "VUID-VkSubpassDescription-layout-02519"; log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).", function_name, subpass, attachment, string_VkImageLayout(attachment_layouts[attachment]), string_VkImageLayout(new_layout)); } } else if (uses & ~ATTACHMENT_INPUT || (uses && (new_use == ATTACHMENT_RESOLVE || new_use == ATTACHMENT_PRESERVE))) { /* Note: input attachments are assumed to be done first. */ vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pPreserveAttachments-03074" : "VUID-VkSubpassDescription-pPreserveAttachments-00854"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass, attachment, StringAttachmentType(uses), StringAttachmentType(new_use)); } else { attachment_layouts[attachment] = new_layout; uses |= new_use; } return skip; } bool CoreChecks::ValidateRenderpassAttachmentUsage(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2KHR *pCreateInfo) { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()"; for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i]; std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount); std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount); if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pipelineBindPoint-03062" : "VUID-VkSubpassDescription-pipelineBindPoint-00844"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", function_name, i); } for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { auto const &attachment_ref = subpass.pInputAttachments[j]; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Input"); if (attachment_ref.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) { vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkInputAttachmentAspectReference-aspectMask-01964"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.", function_name, i, j); } if (attachment_ref.attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment, ATTACHMENT_INPUT, attachment_ref.layout); vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pNext-01963"; skip |= ValidateImageAspectMask(VK_NULL_HANDLE, pCreateInfo->pAttachments[attachment_ref.attachment].format, attachment_ref.aspectMask, function_name, vuid); } } if (rp_version == RENDER_PASS_VERSION_2) { // These are validated automatically as part of parameter validation for create renderpass 1 // as they are in a struct that only applies to input attachments - not so for v2. // Check for 0 if (attachment_ref.aspectMask == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescription2KHR-aspectMask-03176", "%s: Input attachment (%d) aspect mask must not be 0.", function_name, j); } else { const VkImageAspectFlags valid_bits = (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT); // Check for valid aspect mask bits if (attachment_ref.aspectMask & ~valid_bits) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescription2KHR-aspectMask-03175", "%s: Input attachment (%d) aspect mask (0x%" PRIx32 ")is invalid.", function_name, j, attachment_ref.aspectMask); } } } } for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { uint32_t attachment = subpass.pPreserveAttachments[j]; if (attachment == VK_ATTACHMENT_UNUSED) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j); } else { skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, "Preserve"); if (attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE, VkImageLayout(0) /* preserve doesn't have any layout */); } } } bool subpass_performs_resolve = false; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { if (subpass.pResolveAttachments) { auto const &attachment_ref = subpass.pResolveAttachments[j]; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Resolve"); if (attachment_ref.attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment, ATTACHMENT_RESOLVE, attachment_ref.layout); subpass_performs_resolve = true; if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03067" : "VUID-VkSubpassDescription-pResolveAttachments-00849"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u requests multisample resolve into attachment %u, which must " "have VK_SAMPLE_COUNT_1_BIT but has %s.", function_name, i, attachment_ref.attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples)); } } } } } if (subpass.pDepthStencilAttachment) { if (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, subpass.pDepthStencilAttachment->attachment, pCreateInfo->attachmentCount, "Depth"); if (subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, subpass.pDepthStencilAttachment->attachment, ATTACHMENT_DEPTH, subpass.pDepthStencilAttachment->layout); } } } uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { auto const &attachment_ref = subpass.pColorAttachments[j]; skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Color"); if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED && attachment_ref.attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment, ATTACHMENT_COLOR, attachment_ref.layout); VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_ref.attachment].samples; if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) { VkSampleCountFlagBits last_sample_count = pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples; if (current_sample_count != last_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pColorAttachments-03069" : "VUID-VkSubpassDescription-pColorAttachments-01417"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u attempts to render to color attachments with inconsistent sample counts." "Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has " "sample count %s.", function_name, i, j, string_VkSampleCountFlagBits(current_sample_count), last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count)); } } last_sample_count_attachment = j; if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03066" : "VUID-VkSubpassDescription-pResolveAttachments-00848"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u requests multisample resolve from attachment %u which has " "VK_SAMPLE_COUNT_1_BIT.", function_name, i, attachment_ref.attachment); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) { const auto depth_stencil_sample_count = pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples; if (device_extensions.vk_amd_mixed_attachment_samples) { if (pCreateInfo->pAttachments[attachment_ref.attachment].samples > depth_stencil_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pColorAttachments-03070" : "VUID-VkSubpassDescription-pColorAttachments-01506"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u pColorAttachments[%u] has %s which is larger than " "depth/stencil attachment %s.", function_name, i, j, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples), string_VkSampleCountFlagBits(depth_stencil_sample_count)); break; } } if (!device_extensions.vk_amd_mixed_attachment_samples && !device_extensions.vk_nv_framebuffer_mixed_samples && current_sample_count != depth_stencil_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pDepthStencilAttachment-03071" : "VUID-VkSubpassDescription-pDepthStencilAttachment-01418"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u attempts to render to use a depth/stencil attachment with sample count that differs " "from color attachment %u." "The depth attachment ref has sample count %s, whereas color attachment ref %u has sample count %s.", function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j, string_VkSampleCountFlagBits(current_sample_count)); break; } } } if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED && subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) { if (attachment_ref.attachment == VK_ATTACHMENT_UNUSED) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03065" : "VUID-VkSubpassDescription-pResolveAttachments-00847"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u requests multisample resolve from attachment %u which has " "attachment=VK_ATTACHMENT_UNUSED.", function_name, i, attachment_ref.attachment); } else { const auto &color_desc = pCreateInfo->pAttachments[attachment_ref.attachment]; const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment]; if (color_desc.format != resolve_desc.format) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03068" : "VUID-VkSubpassDescription-pResolveAttachments-00850"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u pColorAttachments[%u] resolves to an attachment with a " "different format. color format: %u, resolve format: %u.", function_name, i, j, color_desc.format, resolve_desc.format); } } } } } return skip; } static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass, uint32_t index, bool is_read) { if (index == VK_ATTACHMENT_UNUSED) return; if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read; } bool CoreChecks::ValidateCreateRenderPass(VkDevice device, RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2KHR *pCreateInfo, RENDER_PASS_STATE *render_pass) { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()"; // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with // ValidateLayouts. skip |= ValidateRenderpassAttachmentUsage(rp_version, pCreateInfo); render_pass->renderPass = VK_NULL_HANDLE; skip |= ValidateRenderPassDAG(rp_version, pCreateInfo, render_pass); // Validate multiview correlation and view masks bool viewMaskZero = false; bool viewMaskNonZero = false; for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i]; if (subpass.viewMask != 0) { viewMaskNonZero = true; } else { viewMaskZero = true; } if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 && (subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-flags-03076" : "VUID-VkSubpassDescription-flags-00856"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: The flags parameter of subpass description %u includes " "VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include " "VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.", function_name, i); } } if (rp_version == RENDER_PASS_VERSION_2) { if (viewMaskNonZero && viewMaskZero) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo2KHR-viewMask-03058", "%s: Some view masks are non-zero whilst others are zero.", function_name); } if (viewMaskZero && pCreateInfo->correlatedViewMaskCount != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo2KHR-viewMask-03057", "%s: Multiview is not enabled but correlation masks are still provided", function_name); } } uint32_t aggregated_cvms = 0; for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) { if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pCorrelatedViewMasks-03056" : "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i); } aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i]; } for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { auto const &dependency = pCreateInfo->pDependencies[i]; if (rp_version == RENDER_PASS_VERSION_2) { skip |= ValidateStageMaskGsTsEnables( dependency.srcStageMask, function_name, "VUID-VkSubpassDependency2KHR-srcStageMask-03080", "VUID-VkSubpassDependency2KHR-srcStageMask-03082", "VUID-VkSubpassDependency2KHR-srcStageMask-02103", "VUID-VkSubpassDependency2KHR-srcStageMask-02104"); skip |= ValidateStageMaskGsTsEnables( dependency.dstStageMask, function_name, "VUID-VkSubpassDependency2KHR-dstStageMask-03081", "VUID-VkSubpassDependency2KHR-dstStageMask-03083", "VUID-VkSubpassDependency2KHR-dstStageMask-02105", "VUID-VkSubpassDependency2KHR-dstStageMask-02106"); } else { skip |= ValidateStageMaskGsTsEnables( dependency.srcStageMask, function_name, "VUID-VkSubpassDependency-srcStageMask-00860", "VUID-VkSubpassDependency-srcStageMask-00862", "VUID-VkSubpassDependency-srcStageMask-02099", "VUID-VkSubpassDependency-srcStageMask-02100"); skip |= ValidateStageMaskGsTsEnables( dependency.dstStageMask, function_name, "VUID-VkSubpassDependency-dstStageMask-00861", "VUID-VkSubpassDependency-dstStageMask-00863", "VUID-VkSubpassDependency-dstStageMask-02101", "VUID-VkSubpassDependency-dstStageMask-02102"); } if (!ValidateAccessMaskPipelineStage(device_extensions, dependency.srcAccessMask, dependency.srcStageMask)) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcAccessMask-03088" : "VUID-VkSubpassDependency-srcAccessMask-00868"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: pDependencies[%u].srcAccessMask (0x%" PRIx32 ") is not supported by srcStageMask (0x%" PRIx32 ").", function_name, i, dependency.srcAccessMask, dependency.srcStageMask); } if (!ValidateAccessMaskPipelineStage(device_extensions, dependency.dstAccessMask, dependency.dstStageMask)) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-dstAccessMask-03089" : "VUID-VkSubpassDependency-dstAccessMask-00869"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: pDependencies[%u].dstAccessMask (0x%" PRIx32 ") is not supported by dstStageMask (0x%" PRIx32 ").", function_name, i, dependency.dstAccessMask, dependency.dstStageMask); } } if (!skip) { skip |= ValidateLayouts(rp_version, device, pCreateInfo); } return skip; } bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { bool skip = false; // Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds) const VkRenderPassMultiviewCreateInfo *pMultiviewInfo = lvl_find_in_chain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext); if (pMultiviewInfo) { if (pMultiviewInfo->subpassCount && pMultiviewInfo->subpassCount != pCreateInfo->subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo-pNext-01928", "Subpass count is %u but multiview info has a subpass count of %u.", pCreateInfo->subpassCount, pMultiviewInfo->subpassCount); } else if (pMultiviewInfo->dependencyCount && pMultiviewInfo->dependencyCount != pCreateInfo->dependencyCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo-pNext-01929", "Dependency count is %u but multiview info has a dependency count of %u.", pCreateInfo->dependencyCount, pMultiviewInfo->dependencyCount); } } const VkRenderPassInputAttachmentAspectCreateInfo *pInputAttachmentAspectInfo = lvl_find_in_chain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext); if (pInputAttachmentAspectInfo) { for (uint32_t i = 0; i < pInputAttachmentAspectInfo->aspectReferenceCount; ++i) { uint32_t subpass = pInputAttachmentAspectInfo->pAspectReferences[i].subpass; uint32_t attachment = pInputAttachmentAspectInfo->pAspectReferences[i].inputAttachmentIndex; if (subpass >= pCreateInfo->subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo-pNext-01926", "Subpass index %u specified by input attachment aspect info %u is greater than the subpass " "count of %u for this render pass.", subpass, i, pCreateInfo->subpassCount); } else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo-pNext-01927", "Input attachment index %u specified by input attachment aspect info %u is greater than the " "input attachment count of %u for this subpass.", attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount); } } } if (!skip) { auto render_pass = std::unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo)); skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_1, render_pass->createInfo.ptr(), render_pass.get()); } return skip; } void CoreChecks::RecordCreateRenderPassState(RenderPassCreateVersion rp_version, std::shared_ptr<RENDER_PASS_STATE> &render_pass, VkRenderPass *pRenderPass) { render_pass->renderPass = *pRenderPass; auto create_info = render_pass->createInfo.ptr(); RecordRenderPassDAG(RENDER_PASS_VERSION_1, create_info, render_pass.get()); for (uint32_t i = 0; i < create_info->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = create_info->pSubpasses[i]; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false); // resolve attachments are considered to be written if (subpass.pResolveAttachments) { MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false); } } if (subpass.pDepthStencilAttachment) { MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false); } for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true); } } // Even though render_pass is an rvalue-ref parameter, still must move s.t. move assignment is invoked. renderPassMap[*pRenderPass] = std::move(render_pass); } // Style note: // Use of rvalue reference exceeds reccommended usage of rvalue refs in google style guide, but intentionally forces caller to move // or copy. This is clearer than passing a pointer to shared_ptr and avoids the atomic increment/decrement of shared_ptr copy // construction or assignment. void CoreChecks::PostCallRecordCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass, VkResult result) { if (VK_SUCCESS != result) return; auto render_pass_state = std::make_shared<RENDER_PASS_STATE>(pCreateInfo); RecordCreateRenderPassState(RENDER_PASS_VERSION_1, render_pass_state, pRenderPass); } void CoreChecks::PostCallRecordCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass, VkResult result) { if (VK_SUCCESS != result) return; auto render_pass_state = std::make_shared<RENDER_PASS_STATE>(pCreateInfo); RecordCreateRenderPassState(RENDER_PASS_VERSION_2, render_pass_state, pRenderPass); } static bool ValidateDepthStencilResolve(const debug_report_data *report_data, const VkPhysicalDeviceDepthStencilResolvePropertiesKHR &depth_stencil_resolve_props, const VkRenderPassCreateInfo2KHR *pCreateInfo) { bool skip = false; // If the pNext list of VkSubpassDescription2KHR includes a VkSubpassDescriptionDepthStencilResolveKHR structure, // then that structure describes depth/stencil resolve operations for the subpass. for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) { VkSubpassDescription2KHR subpass = pCreateInfo->pSubpasses[i]; const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(subpass.pNext); if (resolve == nullptr) { continue; } if (resolve->pDepthStencilResolveAttachment != nullptr && resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03177", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u, but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.", i, resolve->pDepthStencilResolveAttachment->attachment); } if (resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR && resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03178", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u, but both depth and stencil resolve modes are " "VK_RESOLVE_MODE_NONE_KHR.", i, resolve->pDepthStencilResolveAttachment->attachment); } } if (resolve->pDepthStencilResolveAttachment != nullptr && pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples == VK_SAMPLE_COUNT_1_BIT) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03179", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u. However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.", i, resolve->pDepthStencilResolveAttachment->attachment); } if (pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].samples != VK_SAMPLE_COUNT_1_BIT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03180", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u which has sample count=VK_SAMPLE_COUNT_1_BIT.", i, resolve->pDepthStencilResolveAttachment->attachment); } VkFormat pDepthStencilAttachmentFormat = pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].format; VkFormat pDepthStencilResolveAttachmentFormat = pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format; if ((FormatDepthSize(pDepthStencilAttachmentFormat) != FormatDepthSize(pDepthStencilResolveAttachmentFormat)) || (FormatDepthNumericalType(pDepthStencilAttachmentFormat) != FormatDepthNumericalType(pDepthStencilResolveAttachmentFormat))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03181", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u which has a depth component (size %u). The depth component " "of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.", i, resolve->pDepthStencilResolveAttachment->attachment, FormatDepthSize(pDepthStencilResolveAttachmentFormat), FormatDepthSize(pDepthStencilAttachmentFormat)); } if ((FormatStencilSize(pDepthStencilAttachmentFormat) != FormatStencilSize(pDepthStencilResolveAttachmentFormat)) || (FormatStencilNumericalType(pDepthStencilAttachmentFormat) != FormatStencilNumericalType(pDepthStencilResolveAttachmentFormat))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03182", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u which has a stencil component (size %u). The stencil component " "of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.", i, resolve->pDepthStencilResolveAttachment->attachment, FormatStencilSize(pDepthStencilResolveAttachmentFormat), FormatStencilSize(pDepthStencilAttachmentFormat)); } if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR || resolve->depthResolveMode & depth_stencil_resolve_props.supportedDepthResolveModes)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-depthResolveMode-03183", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with invalid depthResolveMode=%u.", i, resolve->depthResolveMode); } if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR || resolve->stencilResolveMode & depth_stencil_resolve_props.supportedStencilResolveModes)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-stencilResolveMode-03184", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with invalid stencilResolveMode=%u.", i, resolve->stencilResolveMode); } if (FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) && depth_stencil_resolve_props.independentResolve == VK_FALSE && depth_stencil_resolve_props.independentResolveNone == VK_FALSE && !(resolve->depthResolveMode == resolve->stencilResolveMode)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03185", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.", i, resolve->depthResolveMode, resolve->stencilResolveMode); } if (FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) && depth_stencil_resolve_props.independentResolve == VK_FALSE && depth_stencil_resolve_props.independentResolveNone == VK_TRUE && !(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR || resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03186", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical, or " "one of them must be %u.", i, resolve->depthResolveMode, resolve->stencilResolveMode, VK_RESOLVE_MODE_NONE_KHR); } } return skip; } bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { bool skip = false; if (GetDeviceExtensions()->vk_khr_depth_stencil_resolve) { skip |= ValidateDepthStencilResolve(report_data, phys_dev_ext_props.depth_stencil_resolve_props, pCreateInfo); } auto render_pass = std::make_shared<RENDER_PASS_STATE>(pCreateInfo); skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_2, render_pass->createInfo.ptr(), render_pass.get()); return skip; } bool CoreChecks::ValidatePrimaryCommandBuffer(const GLOBAL_CB_NODE *pCB, char const *cmd_name, const char *error_code) { bool skip = false; if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), error_code, "Cannot execute command %s on a secondary command buffer.", cmd_name); } return skip; } bool CoreChecks::VerifyRenderAreaBounds(const VkRenderPassBeginInfo *pRenderPassBegin) { bool skip = false; const safe_VkFramebufferCreateInfo *pFramebufferInfo = &GetFramebufferState(pRenderPassBegin->framebuffer)->createInfo; if (pRenderPassBegin->renderArea.offset.x < 0 || (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width || pRenderPassBegin->renderArea.offset.y < 0 || (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) { skip |= static_cast<bool>(log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidRenderArea, "Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width " "%d, height %d. Framebuffer: width %d, height %d.", pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width, pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height)); } return skip; } // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the // [load|store]Op flag must be checked // TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately. template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) { if (color_depth_op != op && stencil_op != op) { return false; } bool check_color_depth_load_op = !FormatIsStencilOnly(format); bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op; return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op))); } bool CoreChecks::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version, const VkRenderPassBeginInfo *pRenderPassBegin) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr; auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr; bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCmdBeginRenderPass2KHR()" : "vkCmdBeginRenderPass()"; if (render_pass_state) { uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR // Handle extension struct from EXT_sample_locations const VkRenderPassSampleLocationsBeginInfoEXT *pSampleLocationsBeginInfo = lvl_find_in_chain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext); if (pSampleLocationsBeginInfo) { for (uint32_t i = 0; i < pSampleLocationsBeginInfo->attachmentInitialSampleLocationsCount; ++i) { if (pSampleLocationsBeginInfo->pAttachmentInitialSampleLocations[i].attachmentIndex >= render_pass_state->createInfo.attachmentCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531", "Attachment index %u specified by attachment sample locations %u is greater than the " "attachment count of %u for the render pass being begun.", pSampleLocationsBeginInfo->pAttachmentInitialSampleLocations[i].attachmentIndex, i, render_pass_state->createInfo.attachmentCount); } } for (uint32_t i = 0; i < pSampleLocationsBeginInfo->postSubpassSampleLocationsCount; ++i) { if (pSampleLocationsBeginInfo->pPostSubpassSampleLocations[i].subpassIndex >= render_pass_state->createInfo.subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532", "Subpass index %u specified by subpass sample locations %u is greater than the subpass count " "of %u for the render pass being begun.", pSampleLocationsBeginInfo->pPostSubpassSampleLocations[i].subpassIndex, i, render_pass_state->createInfo.subpassCount); } } } for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) { auto pAttachment = &render_pass_state->createInfo.pAttachments[i]; if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_CLEAR)) { clear_op_size = static_cast<uint32_t>(i) + 1; } } if (clear_op_size > pRenderPassBegin->clearValueCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(render_pass_state->renderPass), "VUID-VkRenderPassBeginInfo-clearValueCount-00902", "In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there " "must be at least %u entries in pClearValues array to account for the highest index attachment in " "renderPass %s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by " "attachment number so even if some pClearValues entries between 0 and %u correspond to attachments " "that aren't cleared they will be ignored.", function_name, pRenderPassBegin->clearValueCount, clear_op_size, report_data->FormatHandle(render_pass_state->renderPass).c_str(), clear_op_size, clear_op_size - 1); } skip |= VerifyRenderAreaBounds(pRenderPassBegin); skip |= VerifyFramebufferAndRenderPassLayouts(rp_version, cb_state, pRenderPassBegin, GetFramebufferState(pRenderPassBegin->framebuffer)); if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) { skip |= ValidateRenderPassCompatibility("render pass", render_pass_state, "framebuffer", framebuffer->rp_state.get(), function_name, "VUID-VkRenderPassBeginInfo-renderPass-00904"); } vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-renderpass" : "VUID-vkCmdBeginRenderPass-renderpass"; skip |= InsideRenderPass(cb_state, function_name, vuid); skip |= ValidateDependencies(framebuffer, render_pass_state); vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-bufferlevel" : "VUID-vkCmdBeginRenderPass-bufferlevel"; skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdBeginRenderPass-commandBuffer-cmdpool"; skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid); const CMD_TYPE cmd_type = use_rp2 ? CMD_BEGINRENDERPASS2KHR : CMD_BEGINRENDERPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); } auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext); if (chained_device_group_struct) { skip |= ValidateDeviceMaskToPhysicalDeviceCount( chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905"); skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906"); skip |= ValidateDeviceMaskToCommandBuffer( cb_state, chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907"); if (chained_device_group_struct->deviceRenderAreaCount != 0 && chained_device_group_struct->deviceRenderAreaCount != physical_device_count) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908", "deviceRenderAreaCount[%" PRIu32 "] is invaild. Physical device count is %" PRIu32 ".", chained_device_group_struct->deviceRenderAreaCount, physical_device_count); } } return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin); return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfoKHR *pSubpassBeginInfo) { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin); return skip; } void CoreChecks::RecordCmdBeginRenderPassState(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassContents contents) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr; auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr; if (render_pass_state) { cb_state->activeFramebuffer = pRenderPassBegin->framebuffer; cb_state->activeRenderPass = render_pass_state; // This is a shallow copy as that is all that is needed for now cb_state->activeRenderPassBeginInfo = *pRenderPassBegin; cb_state->activeSubpass = 0; cb_state->activeSubpassContents = contents; cb_state->framebuffers.insert(pRenderPassBegin->framebuffer); // Connect this framebuffer and its children to this cmdBuffer AddFramebufferBinding(cb_state, framebuffer); // Connect this RP to cmdBuffer AddCommandBufferBinding(&render_pass_state->cb_bindings, {HandleToUint64(render_pass_state->renderPass), kVulkanObjectTypeRenderPass}, cb_state); // transition attachments to the correct layouts for beginning of renderPass and first subpass TransitionBeginRenderPassLayouts(cb_state, render_pass_state, framebuffer); auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext); if (chained_device_group_struct) { cb_state->active_render_pass_device_mask = chained_device_group_struct->deviceMask; } else { cb_state->active_render_pass_device_mask = cb_state->initial_device_mask; } } } void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { RecordCmdBeginRenderPassState(commandBuffer, pRenderPassBegin, contents); } void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfoKHR *pSubpassBeginInfo) { RecordCmdBeginRenderPassState(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents); } bool CoreChecks::ValidateCmdNextSubpass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCmdNextSubpass2KHR()" : "vkCmdNextSubpass()"; vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-bufferlevel" : "VUID-vkCmdNextSubpass-bufferlevel"; skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdNextSubpass-commandBuffer-cmdpool"; skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid); const CMD_TYPE cmd_type = use_rp2 ? CMD_NEXTSUBPASS2KHR : CMD_NEXTSUBPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-renderpass" : "VUID-vkCmdNextSubpass-renderpass"; skip |= OutsideRenderPass(cb_state, function_name, vuid); auto subpassCount = cb_state->activeRenderPass->createInfo.subpassCount; if (cb_state->activeSubpass == subpassCount - 1) { vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-None-03102" : "VUID-vkCmdNextSubpass-None-00909"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), vuid, "%s: Attempted to advance beyond final subpass.", function_name); } return skip; } bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_1, commandBuffer); } bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo, const VkSubpassEndInfoKHR *pSubpassEndInfo) { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer); } void CoreChecks::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->activeSubpass++; cb_state->activeSubpassContents = contents; TransitionSubpassLayouts(cb_state, cb_state->activeRenderPass, cb_state->activeSubpass, GetFramebufferState(cb_state->activeRenderPassBeginInfo.framebuffer)); } void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { RecordCmdNextSubpass(commandBuffer, contents); } void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo, const VkSubpassEndInfoKHR *pSubpassEndInfo) { RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents); } bool CoreChecks::ValidateCmdEndRenderPass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCmdEndRenderPass2KHR()" : "vkCmdEndRenderPass()"; RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass; if (rp_state) { if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) { vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-None-03103" : "VUID-vkCmdEndRenderPass-None-00910"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), vuid, "%s: Called before reaching final subpass.", function_name); } } vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-renderpass" : "VUID-vkCmdEndRenderPass-renderpass"; skip |= OutsideRenderPass(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-bufferlevel" : "VUID-vkCmdEndRenderPass-bufferlevel"; skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdEndRenderPass-commandBuffer-cmdpool"; skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid); const CMD_TYPE cmd_type = use_rp2 ? CMD_ENDRENDERPASS2KHR : CMD_ENDRENDERPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_1, commandBuffer); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer); return skip; } void CoreChecks::RecordCmdEndRenderPassState(VkCommandBuffer commandBuffer) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); FRAMEBUFFER_STATE *framebuffer = GetFramebufferState(cb_state->activeFramebuffer); TransitionFinalSubpassLayouts(cb_state, &cb_state->activeRenderPassBeginInfo, framebuffer); cb_state->activeRenderPass = nullptr; cb_state->activeSubpass = 0; cb_state->activeFramebuffer = VK_NULL_HANDLE; } void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) { RecordCmdEndRenderPassState(commandBuffer); } void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) { RecordCmdEndRenderPassState(commandBuffer); } bool CoreChecks::ValidateFramebuffer(VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB, VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB, const char *caller) { bool skip = false; if (!pSubCB->beginInfo.pInheritanceInfo) { return skip; } VkFramebuffer primary_fb = pCB->activeFramebuffer; VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer; if (secondary_fb != VK_NULL_HANDLE) { if (primary_fb != secondary_fb) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(primaryBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00099", "vkCmdExecuteCommands() called w/ invalid secondary command buffer %s which has a framebuffer %s" " that is not the same as the primary command buffer's current active framebuffer %s.", report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str(), report_data->FormatHandle(primary_fb).c_str()); } auto fb = GetFramebufferState(secondary_fb); if (!fb) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(primaryBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer, "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %s which has invalid framebuffer %s.", report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str()); return skip; } } return skip; } bool CoreChecks::ValidateSecondaryCommandBufferState(GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) { bool skip = false; unordered_set<int> activeTypes; if (!disabled.query_validation) { for (auto queryObject : pCB->activeQueries) { auto queryPoolData = queryPoolMap.find(queryObject.pool); if (queryPoolData != queryPoolMap.end()) { if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS && pSubCB->beginInfo.pInheritanceInfo) { VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics; if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), "VUID-vkCmdExecuteCommands-commandBuffer-00104", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %s which has invalid active query pool %s" ". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.", report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(queryPoolData->first).c_str()); } } activeTypes.insert(queryPoolData->second.createInfo.queryType); } } for (auto queryObject : pSubCB->startedQueries) { auto queryPoolData = queryPoolMap.find(queryObject.pool); if (queryPoolData != queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer, "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %s which has invalid active query pool %s" " of type %d but a query of that type has been started on secondary Cmd Buffer %s.", report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(queryPoolData->first).c_str(), queryPoolData->second.createInfo.queryType, report_data->FormatHandle(pSubCB->commandBuffer).c_str()); } } } auto primary_pool = GetCommandPoolNode(pCB->createInfo.commandPool); auto secondary_pool = GetCommandPoolNode(pSubCB->createInfo.commandPool); if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pSubCB->commandBuffer), kVUID_Core_DrawState_InvalidQueueFamily, "vkCmdExecuteCommands(): Primary command buffer %s created in queue family %d has secondary command buffer " "%s created in queue family %d.", report_data->FormatHandle(pCB->commandBuffer).c_str(), primary_pool->queueFamilyIndex, report_data->FormatHandle(pSubCB->commandBuffer).c_str(), secondary_pool->queueFamilyIndex); } return skip; } bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); bool skip = false; GLOBAL_CB_NODE *sub_cb_state = NULL; std::unordered_set<GLOBAL_CB_NODE *> linked_command_buffers = cb_state->linkedCommandBuffers; for (uint32_t i = 0; i < commandBuffersCount; i++) { sub_cb_state = GetCBNode(pCommandBuffers[i]); assert(sub_cb_state); if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00088", "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %s in element %u of pCommandBuffers array. All " "cmd buffers in pCommandBuffers array must be secondary.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), i); } else if (VK_COMMAND_BUFFER_LEVEL_SECONDARY == sub_cb_state->createInfo.level) { if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) { auto secondary_rp_state = GetRenderPassState(sub_cb_state->beginInfo.pInheritanceInfo->renderPass); if (cb_state->activeRenderPass && !(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00096", "vkCmdExecuteCommands(): Secondary Command Buffer (%s) is executed within a render pass (%s) " "instance scope, but the Secondary Command Buffer does not have the " "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when " "the vkBeginCommandBuffer() was called.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass).c_str()); } else if (!cb_state->activeRenderPass && (sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00100", "vkCmdExecuteCommands(): Secondary Command Buffer (%s) is executed outside a render pass " "instance scope, but the Secondary Command Buffer does have the " "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when " "the vkBeginCommandBuffer() was called.", report_data->FormatHandle(pCommandBuffers[i]).c_str()); } else if (cb_state->activeRenderPass && (sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { // Make sure render pass is compatible with parent command buffer pass if has continue if (cb_state->activeRenderPass->renderPass != secondary_rp_state->renderPass) { skip |= ValidateRenderPassCompatibility( "primary command buffer", cb_state->activeRenderPass, "secondary command buffer", secondary_rp_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098"); } // If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB skip |= ValidateFramebuffer(commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state, "vkCmdExecuteCommands()"); if (!sub_cb_state->cmd_execute_commands_functions.empty()) { // Inherit primary's activeFramebuffer and while running validate functions for (auto &function : sub_cb_state->cmd_execute_commands_functions) { skip |= function(cb_state, cb_state->activeFramebuffer); } } } } } // TODO(mlentine): Move more logic into this method skip |= ValidateSecondaryCommandBufferState(cb_state, sub_cb_state); skip |= ValidateCommandBufferState(sub_cb_state, "vkCmdExecuteCommands()", 0, "VUID-vkCmdExecuteCommands-pCommandBuffers-00089"); if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { if (sub_cb_state->in_use.load() || linked_command_buffers.count(sub_cb_state)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00090", "Attempt to simultaneously execute command buffer %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!", report_data->FormatHandle(cb_state->commandBuffer).c_str()); } if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse, "vkCmdExecuteCommands(): Secondary Command Buffer (%s) does not have " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary " "command buffer (%s) to be treated as if it does not have " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->commandBuffer).c_str()); } } if (!cb_state->activeQueries.empty() && !enabled_features.core.inheritedQueries) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-commandBuffer-00101", "vkCmdExecuteCommands(): Secondary Command Buffer (%s) cannot be submitted with a query in flight and " "inherited queries not supported on this device.", report_data->FormatHandle(pCommandBuffers[i]).c_str()); } // Validate initial layout uses vs. the primary cmd buffer state // Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001" // initial layout usage of secondary command buffers resources must match parent command buffer const auto *const_cb_state = static_cast<const GLOBAL_CB_NODE *>(cb_state); for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) { const auto image = sub_layout_map_entry.first; const auto *image_state = GetImageState(image); if (!image_state) continue; // Can't set layouts of a dead image const auto *cb_subres_map = GetImageSubresourceLayoutMap(const_cb_state, image); // Const getter can be null in which case we have nothing to check against for this image... if (!cb_subres_map) continue; const auto &sub_cb_subres_map = sub_layout_map_entry.second; // Validate the initial_uses, that they match the current state of the primary cb, or absent a current state, // that the match any initial_layout. for (auto it_init = sub_cb_subres_map->BeginInitialUse(); !it_init.AtEnd(); ++it_init) { const auto &sub_layout = (*it_init).layout; if (VK_IMAGE_LAYOUT_UNDEFINED == sub_layout) continue; // secondary doesn't care about current or initial const auto &subresource = (*it_init).subresource; // Look up the current layout (if any) VkImageLayout cb_layout = cb_subres_map->GetSubresourceLayout(subresource); const char *layout_type = "current"; if (cb_layout == kInvalidLayout) { // Find initial layout (if any) cb_layout = cb_subres_map->GetSubresourceInitialLayout(subresource); layout_type = "initial"; } if ((cb_layout != kInvalidLayout) && (cb_layout != sub_layout)) { log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001", "%s: Executed secondary command buffer using image %s (subresource: aspectMask 0x%X array layer %u, " "mip level %u) which expects layout %s--instead, image %s layout is %s.", "vkCmdExecuteCommands():", report_data->FormatHandle(image).c_str(), subresource.aspectMask, subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(sub_layout), layout_type, string_VkImageLayout(cb_layout)); } } } linked_command_buffers.insert(sub_cb_state); } skip |= ValidatePrimaryCommandBuffer(cb_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-bufferlevel"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdExecuteCommands()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdExecuteCommands-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()"); return skip; } void CoreChecks::PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); GLOBAL_CB_NODE *sub_cb_state = NULL; for (uint32_t i = 0; i < commandBuffersCount; i++) { sub_cb_state = GetCBNode(pCommandBuffers[i]); assert(sub_cb_state); if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { // TODO: Because this is a state change, clearing the VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT needs to be moved // from the validation step to the recording step cb_state->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; } } // Propagate inital layout and current layout state to the primary cmd buffer for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) { const auto image = sub_layout_map_entry.first; const auto *image_state = GetImageState(image); if (!image_state) continue; // Can't set layouts of a dead image auto *cb_subres_map = GetImageSubresourceLayoutMap(cb_state, *image_state); const auto *sub_cb_subres_map = sub_layout_map_entry.second.get(); assert(cb_subres_map && sub_cb_subres_map); // Non const get and map traversal should never be null cb_subres_map->UpdateFrom(*sub_cb_subres_map); } sub_cb_state->primaryCommandBuffer = cb_state->commandBuffer; cb_state->linkedCommandBuffers.insert(sub_cb_state); sub_cb_state->linkedCommandBuffers.insert(cb_state); for (auto &function : sub_cb_state->queryUpdates) { cb_state->queryUpdates.push_back(function); } for (auto &function : sub_cb_state->queue_submit_functions) { cb_state->queue_submit_functions.push_back(function); } } } bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) { bool skip = false; DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem); if (mem_info) { auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1; skip |= ValidateMapImageLayouts(device, mem_info, offset, end_offset); if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), "VUID-vkMapMemory-memory-00682", "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %s.", report_data->FormatHandle(mem).c_str()); } } skip |= ValidateMapMemRange(mem, offset, size); return skip; } void CoreChecks::PostCallRecordMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData, VkResult result) { if (VK_SUCCESS != result) return; // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this StoreMemRanges(mem, offset, size); InitializeAndTrackMemory(mem, offset, size, ppData); } bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) { bool skip = false; auto mem_info = GetMemObjInfo(mem); if (mem_info && !mem_info->mem_range.size) { // Valid Usage: memory must currently be mapped skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), "VUID-vkUnmapMemory-memory-00689", "Unmapping Memory without memory being mapped: mem obj %s.", report_data->FormatHandle(mem).c_str()); } return skip; } void CoreChecks::PreCallRecordUnmapMemory(VkDevice device, VkDeviceMemory mem) { auto mem_info = GetMemObjInfo(mem); mem_info->mem_range.size = 0; if (mem_info->shadow_copy) { free(mem_info->shadow_copy_base); mem_info->shadow_copy_base = 0; mem_info->shadow_copy = 0; } } bool CoreChecks::ValidateMemoryIsMapped(const char *funcName, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { bool skip = false; for (uint32_t i = 0; i < memRangeCount; ++i) { auto mem_info = GetMemObjInfo(pMemRanges[i].memory); if (mem_info) { if (pMemRanges[i].size == VK_WHOLE_SIZE) { if (mem_info->mem_range.offset > pMemRanges[i].offset) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00686", "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").", funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset)); } } else { const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE) ? mem_info->alloc_info.allocationSize : (mem_info->mem_range.offset + mem_info->mem_range.size); if ((mem_info->mem_range.offset > pMemRanges[i].offset) || (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00685", "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER ") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").", funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end)); } } } } return skip; } bool CoreChecks::ValidateAndCopyNoncoherentMemoryToDriver(uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) { bool skip = false; for (uint32_t i = 0; i < mem_range_count; ++i) { auto mem_info = GetMemObjInfo(mem_ranges[i].memory); if (mem_info) { if (mem_info->shadow_copy) { VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE) ? mem_info->mem_range.size : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset); char *data = static_cast<char *>(mem_info->shadow_copy); for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) { if (data[j] != NoncoherentMemoryFillValue) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory), kVUID_Core_MemTrack_InvalidMap, "Memory underflow was detected on mem obj %s.", report_data->FormatHandle(mem_ranges[i].memory).c_str()); } } for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) { if (data[j] != NoncoherentMemoryFillValue) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory), kVUID_Core_MemTrack_InvalidMap, "Memory overflow was detected on mem obj %s.", report_data->FormatHandle(mem_ranges[i].memory).c_str()); } } memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size)); } } } return skip; } void CoreChecks::CopyNoncoherentMemoryFromDriver(uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) { for (uint32_t i = 0; i < mem_range_count; ++i) { auto mem_info = GetMemObjInfo(mem_ranges[i].memory); if (mem_info && mem_info->shadow_copy) { VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE) ? mem_info->mem_range.size : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset); char *data = static_cast<char *>(mem_info->shadow_copy); memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size)); } } } bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(const char *func_name, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) { bool skip = false; for (uint32_t i = 0; i < mem_range_count; ++i) { uint64_t atom_size = phys_dev_props.limits.nonCoherentAtomSize; if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-offset-00687", "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").", func_name, i, mem_ranges[i].offset, atom_size); } auto mem_info = GetMemObjInfo(mem_ranges[i].memory); if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (mem_ranges[i].size + mem_ranges[i].offset != mem_info->alloc_info.allocationSize) && (SafeModulo(mem_ranges[i].size, atom_size) != 0)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-size-01390", "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").", func_name, i, mem_ranges[i].size, atom_size); } } return skip; } bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { bool skip = false; skip |= ValidateMappedMemoryRangeDeviceLimits("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); skip |= ValidateAndCopyNoncoherentMemoryToDriver(memRangeCount, pMemRanges); skip |= ValidateMemoryIsMapped("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); return skip; } bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { bool skip = false; skip |= ValidateMappedMemoryRangeDeviceLimits("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); skip |= ValidateMemoryIsMapped("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); return skip; } void CoreChecks::PostCallRecordInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges, VkResult result) { if (VK_SUCCESS == result) { // Update our shadow copy with modified driver data CopyNoncoherentMemoryFromDriver(memRangeCount, pMemRanges); } } bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) { bool skip = false; auto mem_info = GetMemObjInfo(mem); if (mem_info) { if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), "VUID-vkGetDeviceMemoryCommitment-memory-00690", "Querying commitment for memory without VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: mem obj %s.", report_data->FormatHandle(mem).c_str()); } } return skip; } bool CoreChecks::ValidateBindImageMemory(VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset, const char *api_name) { bool skip = false; IMAGE_STATE *image_state = GetImageState(image); if (image_state) { // Track objects tied to memory uint64_t image_handle = HandleToUint64(image); skip = ValidateSetMemBinding(mem, image_handle, kVulkanObjectTypeImage, api_name); if (!image_state->memory_requirements_checked) { // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from // vkGetImageMemoryRequirements() skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, kVUID_Core_DrawState_InvalidImage, "%s: Binding memory to image %s but vkGetImageMemoryRequirements() has not been called on that image.", api_name, report_data->FormatHandle(image_handle).c_str()); // Make the call for them so we can verify the state DispatchGetImageMemoryRequirements(device, image, &image_state->requirements); } // Validate bound memory range information auto mem_info = GetMemObjInfo(mem); if (mem_info) { skip |= ValidateInsertImageMemoryRange(image, mem_info, memoryOffset, image_state->requirements, image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name); skip |= ValidateMemoryTypes(mem_info, image_state->requirements.memoryTypeBits, api_name, "VUID-vkBindImageMemory-memory-01047"); } // Validate memory requirements alignment if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, "VUID-vkBindImageMemory-memoryOffset-01048", "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with image.", api_name, memoryOffset, image_state->requirements.alignment); } if (mem_info) { // Validate memory requirements size if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, "VUID-vkBindImageMemory-size-01049", "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with image.", api_name, mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size); } // Validate dedicated allocation if (mem_info->is_dedicated && ((mem_info->dedicated_image != image) || (memoryOffset != 0))) { // TODO: Add vkBindImageMemory2KHR error message when added to spec. auto validation_error = kVUIDUndefined; if (strcmp(api_name, "vkBindImageMemory()") == 0) { validation_error = "VUID-vkBindImageMemory-memory-01509"; } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, image_handle, validation_error, "%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfoKHR::image %s must be equal " "to image %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(mem_info->dedicated_image).c_str(), report_data->FormatHandle(image_handle).c_str(), memoryOffset); } } } return skip; } bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) { return ValidateBindImageMemory(image, mem, memoryOffset, "vkBindImageMemory()"); } void CoreChecks::UpdateBindImageMemoryState(VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) { IMAGE_STATE *image_state = GetImageState(image); if (image_state) { // Track bound memory range information auto mem_info = GetMemObjInfo(mem); if (mem_info) { InsertImageMemoryRange(image, mem_info, memoryOffset, image_state->requirements, image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR); } // Track objects tied to memory uint64_t image_handle = HandleToUint64(image); SetMemBinding(mem, image_state, memoryOffset, image_handle, kVulkanObjectTypeImage); } } void CoreChecks::PostCallRecordBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset, VkResult result) { if (VK_SUCCESS != result) return; UpdateBindImageMemoryState(image, mem, memoryOffset); } bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) { bool skip = false; char api_name[128]; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i); skip |= ValidateBindImageMemory(pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) { bool skip = false; char api_name[128]; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindImageMemory2KHR() pBindInfos[%u]", i); skip |= ValidateBindImageMemory(pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } void CoreChecks::PostCallRecordBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos, VkResult result) { if (VK_SUCCESS != result) return; for (uint32_t i = 0; i < bindInfoCount; i++) { UpdateBindImageMemoryState(pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset); } } void CoreChecks::PostCallRecordBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos, VkResult result) { if (VK_SUCCESS != result) return; for (uint32_t i = 0; i < bindInfoCount; i++) { UpdateBindImageMemoryState(pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset); } } bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) { bool skip = false; auto event_state = GetEventNode(event); if (event_state) { if (event_state->write_in_use) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, HandleToUint64(event), kVUID_Core_DrawState_QueueForwardProgress, "Cannot call vkSetEvent() on event %s that is already in use by a command buffer.", report_data->FormatHandle(event).c_str()); } } return skip; } void CoreChecks::PreCallRecordSetEvent(VkDevice device, VkEvent event) { auto event_state = GetEventNode(event); if (event_state) { event_state->needsSignaled = false; event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT; } // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297) for (auto queue_data : queueMap) { auto event_entry = queue_data.second.eventToStageMap.find(event); if (event_entry != queue_data.second.eventToStageMap.end()) { event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT; } } } bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) { auto pFence = GetFenceNode(fence); bool skip = ValidateFenceForSubmit(pFence); if (skip) { return true; } unordered_set<VkSemaphore> signaled_semaphores; unordered_set<VkSemaphore> unsignaled_semaphores; unordered_set<VkSemaphore> internal_semaphores; for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) { const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx]; std::vector<SEMAPHORE_WAIT> semaphore_waits; std::vector<VkSemaphore> semaphore_signals; for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) { VkSemaphore semaphore = bindInfo.pWaitSemaphores[i]; auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (unsignaled_semaphores.count(semaphore) || (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "Queue %s is waiting on semaphore %s that has no way to be signaled.", report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str()); } else { signaled_semaphores.erase(semaphore); unsignaled_semaphores.insert(semaphore); } } if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) { internal_semaphores.insert(semaphore); } } for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) { VkSemaphore semaphore = bindInfo.pSignalSemaphores[i]; auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) { if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "Queue %s is signaling semaphore %s that was previously signaled by queue %s but has not since " "been waited on by any queue.", report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(pSemaphore->signaler.first).c_str()); } else { unsignaled_semaphores.erase(semaphore); signaled_semaphores.insert(semaphore); } } } // Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound std::unordered_set<IMAGE_STATE *> sparse_images; // If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) { const auto &image_bind = bindInfo.pImageBinds[i]; auto image_state = GetImageState(image_bind.image); if (!image_state) continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here. sparse_images.insert(image_state); if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) { if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) { // For now just warning if sparse image binding occurs without calling to get reqs first return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding sparse memory to image %s without first calling " "vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.", report_data->FormatHandle(image_state->image).c_str()); } } if (!image_state->memory_requirements_checked) { // For now just warning if sparse image binding occurs without calling to get reqs first return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding sparse memory to image %s without first calling " "vkGetImageMemoryRequirements() to retrieve requirements.", report_data->FormatHandle(image_state->image).c_str()); } } for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) { const auto &image_opaque_bind = bindInfo.pImageOpaqueBinds[i]; auto image_state = GetImageState(bindInfo.pImageOpaqueBinds[i].image); if (!image_state) continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here. sparse_images.insert(image_state); if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) { if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) { // For now just warning if sparse image binding occurs without calling to get reqs first return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding opaque sparse memory to image %s without first calling " "vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.", report_data->FormatHandle(image_state->image).c_str()); } } if (!image_state->memory_requirements_checked) { // For now just warning if sparse image binding occurs without calling to get reqs first return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding opaque sparse memory to image %s without first calling " "vkGetImageMemoryRequirements() to retrieve requirements.", report_data->FormatHandle(image_state->image).c_str()); } for (uint32_t j = 0; j < image_opaque_bind.bindCount; ++j) { if (image_opaque_bind.pBinds[j].flags & VK_SPARSE_MEMORY_BIND_METADATA_BIT) { image_state->sparse_metadata_bound = true; } } } for (const auto &sparse_image_state : sparse_images) { if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound) { // Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(sparse_image_state->image), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding sparse memory to image %s which requires a metadata aspect but no " "binding with VK_SPARSE_MEMORY_BIND_METADATA_BIT set was made.", report_data->FormatHandle(sparse_image_state->image).c_str()); } } } return skip; } void CoreChecks::PostCallRecordQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence, VkResult result) { if (result != VK_SUCCESS) return; uint64_t early_retire_seq = 0; auto pFence = GetFenceNode(fence); auto pQueue = GetQueueState(queue); if (pFence) { if (pFence->scope == kSyncScopeInternal) { SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount)); if (!bindInfoCount) { // No work to do, just dropping a fence in the queue by itself. pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence); } } else { // Retire work up until this fence early, we will not see the wait that corresponds to this signal early_retire_seq = pQueue->seq + pQueue->submissions.size(); if (!external_sync_warning) { external_sync_warning = true; log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence), kVUID_Core_DrawState_QueueForwardProgress, "vkQueueBindSparse(): Signaling external fence %s on queue %s will disable validation of preceding command " "buffer lifecycle states and the in-use status of associated objects.", report_data->FormatHandle(fence).c_str(), report_data->FormatHandle(queue).c_str()); } } } for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) { const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx]; // Track objects tied to memory for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) { for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) { auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k]; SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size}, HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer); } } for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) { for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) { auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k]; SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size}, HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage); } } for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) { for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) { auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k]; // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4; SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, size}, HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage); } } std::vector<SEMAPHORE_WAIT> semaphore_waits; std::vector<VkSemaphore> semaphore_signals; std::vector<VkSemaphore> semaphore_externals; for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) { VkSemaphore semaphore = bindInfo.pWaitSemaphores[i]; auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore) { if (pSemaphore->scope == kSyncScopeInternal) { if (pSemaphore->signaler.first != VK_NULL_HANDLE) { semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second}); pSemaphore->in_use.fetch_add(1); } pSemaphore->signaler.first = VK_NULL_HANDLE; pSemaphore->signaled = false; } else { semaphore_externals.push_back(semaphore); pSemaphore->in_use.fetch_add(1); if (pSemaphore->scope == kSyncScopeExternalTemporary) { pSemaphore->scope = kSyncScopeInternal; } } } } for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) { VkSemaphore semaphore = bindInfo.pSignalSemaphores[i]; auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore) { if (pSemaphore->scope == kSyncScopeInternal) { pSemaphore->signaler.first = queue; pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1; pSemaphore->signaled = true; pSemaphore->in_use.fetch_add(1); semaphore_signals.push_back(semaphore); } else { // Retire work up until this submit early, we will not see the wait that corresponds to this signal early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1); if (!external_sync_warning) { external_sync_warning = true; log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "vkQueueBindSparse(): Signaling external semaphore %s on queue %s will disable validation of " "preceding command buffer lifecycle states and the in-use status of associated objects.", report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(queue).c_str()); } } } } pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals, bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE); } if (early_retire_seq) { RetireWorkOnQueue(pQueue, early_retire_seq); } } void CoreChecks::PostCallRecordCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore, VkResult result) { if (VK_SUCCESS != result) return; SEMAPHORE_NODE *sNode = &semaphoreMap[*pSemaphore]; sNode->signaler.first = VK_NULL_HANDLE; sNode->signaler.second = 0; sNode->signaled = false; sNode->scope = kSyncScopeInternal; } bool CoreChecks::ValidateImportSemaphore(VkSemaphore semaphore, const char *caller_name) { bool skip = false; SEMAPHORE_NODE *sema_node = GetSemaphoreNode(semaphore); if (sema_node) { VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore}; skip |= ValidateObjectNotInUse(sema_node, obj_struct, caller_name, kVUIDUndefined); } return skip; } void CoreChecks::RecordImportSemaphoreState(VkSemaphore semaphore, VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) { SEMAPHORE_NODE *sema_node = GetSemaphoreNode(semaphore); if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) { if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) && sema_node->scope == kSyncScopeInternal) { sema_node->scope = kSyncScopeExternalTemporary; } else { sema_node->scope = kSyncScopeExternalPermanent; } } } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR( VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) { return ValidateImportSemaphore(pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR"); } void CoreChecks::PostCallRecordImportSemaphoreWin32HandleKHR( VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo, VkResult result) { if (VK_SUCCESS != result) return; RecordImportSemaphoreState(pImportSemaphoreWin32HandleInfo->semaphore, pImportSemaphoreWin32HandleInfo->handleType, pImportSemaphoreWin32HandleInfo->flags); } #endif // VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) { return ValidateImportSemaphore(pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR"); } void CoreChecks::PostCallRecordImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo, VkResult result) { if (VK_SUCCESS != result) return; RecordImportSemaphoreState(pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType, pImportSemaphoreFdInfo->flags); } void CoreChecks::RecordGetExternalSemaphoreState(VkSemaphore semaphore, VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) { SEMAPHORE_NODE *semaphore_state = GetSemaphoreNode(semaphore); if (semaphore_state && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) { // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference semaphore_state->scope = kSyncScopeExternalPermanent; } } #ifdef VK_USE_PLATFORM_WIN32_KHR void CoreChecks::PostCallRecordGetSemaphoreWin32HandleKHR(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo, HANDLE *pHandle, VkResult result) { if (VK_SUCCESS != result) return; RecordGetExternalSemaphoreState(pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType); } #endif void CoreChecks::PostCallRecordGetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd, VkResult result) { if (VK_SUCCESS != result) return; RecordGetExternalSemaphoreState(pGetFdInfo->semaphore, pGetFdInfo->handleType); } bool CoreChecks::ValidateImportFence(VkFence fence, const char *caller_name) { FENCE_NODE *fence_node = GetFenceNode(fence); bool skip = false; if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence), kVUIDUndefined, "Cannot call %s on fence %s that is currently in use.", caller_name, report_data->FormatHandle(fence).c_str()); } return skip; } void CoreChecks::RecordImportFenceState(VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type, VkFenceImportFlagsKHR flags) { FENCE_NODE *fence_node = GetFenceNode(fence); if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) { if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) && fence_node->scope == kSyncScopeInternal) { fence_node->scope = kSyncScopeExternalTemporary; } else { fence_node->scope = kSyncScopeExternalPermanent; } } } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR(VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) { return ValidateImportFence(pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR"); } void CoreChecks::PostCallRecordImportFenceWin32HandleKHR(VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo, VkResult result) { if (VK_SUCCESS != result) return; RecordImportFenceState(pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType, pImportFenceWin32HandleInfo->flags); } #endif // VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) { return ValidateImportFence(pImportFenceFdInfo->fence, "vkImportFenceFdKHR"); } void CoreChecks::PostCallRecordImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo, VkResult result) { if (VK_SUCCESS != result) return; RecordImportFenceState(pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags); } void CoreChecks::RecordGetExternalFenceState(VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type) { FENCE_NODE *fence_state = GetFenceNode(fence); if (fence_state) { if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) { // Export with reference transference becomes external fence_state->scope = kSyncScopeExternalPermanent; } else if (fence_state->scope == kSyncScopeInternal) { // Export with copy transference has a side effect of resetting the fence fence_state->state = FENCE_UNSIGNALED; } } } #ifdef VK_USE_PLATFORM_WIN32_KHR void CoreChecks::PostCallRecordGetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo, HANDLE *pHandle, VkResult result) { if (VK_SUCCESS != result) return; RecordGetExternalFenceState(pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType); } #endif void CoreChecks::PostCallRecordGetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd, VkResult result) { if (VK_SUCCESS != result) return; RecordGetExternalFenceState(pGetFdInfo->fence, pGetFdInfo->handleType); } void CoreChecks::PostCallRecordCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent, VkResult result) { if (VK_SUCCESS != result) return; eventMap[*pEvent].needsSignaled = false; eventMap[*pEvent].write_in_use = 0; eventMap[*pEvent].stageMask = VkPipelineStageFlags(0); } bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state, SWAPCHAIN_NODE *old_swapchain_state) { // All physical devices and queue families are required to be able to present to any native window on Android; require the // application to have established support on any other platform. if (!instance_extensions.vk_khr_android_surface) { auto support_predicate = [this](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool { // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device return (qs.first.gpu == physical_device) && qs.second; }; const auto &support = surface_state->gpu_queue_support; bool is_supported = std::any_of(support.begin(), support.end(), support_predicate); if (!is_supported) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-surface-01270", "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The " "vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with " "this surface for at least one queue family of this device.", func_name)) return true; } } if (old_swapchain_state) { if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pCreateInfo->oldSwapchain), "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name)) return true; } if (old_swapchain_state->retired) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pCreateInfo->oldSwapchain), "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933", "%s: pCreateInfo->oldSwapchain is retired", func_name)) return true; } } if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689", "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height)) return true; } auto physical_device_state = GetPhysicalDeviceState(); if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physical_device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery, "%s: surface capabilities not retrieved for this physical device", func_name)) return true; } else { // have valid capabilities auto &capabilities = physical_device_state->surfaceCapabilities; // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount: if (pCreateInfo->minImageCount < capabilities.minImageCount) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271", "%s called with minImageCount = %d, which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).", func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) return true; } if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272", "%s called with minImageCount = %d, which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).", func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) return true; } // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent: if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) || (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) || (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) || (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274", "%s called with imageExtent = (%d,%d), which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), " "maxImageExtent = (%d,%d).", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height)) return true; } // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in // VkSurfaceCapabilitiesKHR::supportedTransforms. if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) || !(pCreateInfo->preTransform & capabilities.supportedTransforms)) { // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build // it up a little at a time, and then log it: std::string errorString = ""; char str[1024]; // Here's the first part of the message: sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform)); errorString += str; for (int i = 0; i < 32; i++) { // Build up the rest of the message: if ((1 << i) & capabilities.supportedTransforms) { const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i)); sprintf(str, " %s\n", newStr); errorString += str; } } // Log the message that we've built up: if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", errorString.c_str())) return true; } // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) || !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) { // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build // it up a little at a time, and then log it: std::string errorString = ""; char str[1024]; // Here's the first part of the message: sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n", func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha)); errorString += str; for (int i = 0; i < 32; i++) { // Build up the rest of the message: if ((1 << i) & capabilities.supportedCompositeAlpha) { const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i)); sprintf(str, " %s\n", newStr); errorString += str; } } // Log the message that we've built up: if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", errorString.c_str())) return true; } // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers: if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275", "%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers)) return true; } // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags: if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276", "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.", func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags)) return true; } if (device_extensions.vk_khr_surface_protected_capabilities && (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR)) { VkPhysicalDeviceSurfaceInfo2KHR surfaceInfo = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR }; surfaceInfo.surface = pCreateInfo->surface; VkSurfaceProtectedCapabilitiesKHR surfaceProtectedCapabilities = { VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR }; VkSurfaceCapabilities2KHR surfaceCapabilities = { VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR }; surfaceCapabilities.pNext = &surfaceProtectedCapabilities; DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->phys_device, &surfaceInfo, &surfaceCapabilities); if (!surfaceProtectedCapabilities.supportsProtected) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03187", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR but the surface " "capabilities does not have VkSurfaceProtectedCapabilitiesKHR.supportsProtected set to VK_TRUE.", func_name)) return true; } } } // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR(): if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery, "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name)) return true; } else { // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format: bool foundFormat = false; bool foundColorSpace = false; bool foundMatch = false; for (auto const &format : physical_device_state->surface_formats) { if (pCreateInfo->imageFormat == format.format) { // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace: foundFormat = true; if (pCreateInfo->imageColorSpace == format.colorSpace) { foundMatch = true; break; } } else { if (pCreateInfo->imageColorSpace == format.colorSpace) { foundColorSpace = true; } } } if (!foundMatch) { if (!foundFormat) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273", "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d).", func_name, pCreateInfo->imageFormat)) return true; } if (!foundColorSpace) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273", "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d).", func_name, pCreateInfo->imageColorSpace)) return true; } } } // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR(): if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) { // FIFO is required to always be supported if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery, "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name)) return true; } } else { // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR(): bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(), pCreateInfo->presentMode) != physical_device_state->present_modes.end(); if (!foundMatch) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-presentMode-01281", "%s called with a non-supported presentMode (i.e. %s).", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode))) return true; } } // Validate state for shared presentable case if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode || VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) { if (!device_extensions.vk_khr_shared_presentable_image) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_ExtensionNotEnabled, "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not " "been enabled.", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode))) return true; } else if (pCreateInfo->minImageCount != 1) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383", "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount " "must be 1.", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount)) return true; } } if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) { if (!device_extensions.vk_khr_swapchain_mutable_format) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_ExtensionNotEnabled, "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR which requires the " "VK_KHR_swapchain_mutable_format extension, which has not been enabled.", func_name)) return true; } else { const auto *image_format_list = lvl_find_in_chain<VkImageFormatListCreateInfoKHR>(pCreateInfo->pNext); if (image_format_list == nullptr) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the pNext chain of " "pCreateInfo does not contain an instance of VkImageFormatListCreateInfoKHR.", func_name)) return true; } else if (image_format_list->viewFormatCount == 0) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the viewFormatCount " "member of VkImageFormatListCreateInfoKHR in the pNext chain is zero.", func_name)) return true; } else { bool found_base_format = false; for (uint32_t i = 0; i < image_format_list->viewFormatCount; ++i) { if (image_format_list->pViewFormats[i] == pCreateInfo->imageFormat) { found_base_format = true; break; } } if (!found_base_format) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but none of the " "elements of the pViewFormats member of VkImageFormatListCreateInfoKHR match " "pCreateInfo->imageFormat.", func_name)) return true; } } } } if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) { bool skip = ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices", "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428", "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428", false); if (skip) return true; } return false; } bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) { auto surface_state = GetSurfaceState(pCreateInfo->surface); auto old_swapchain_state = GetSwapchainNode(pCreateInfo->oldSwapchain); return ValidateCreateSwapchain("vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state); } void CoreChecks::RecordCreateSwapchainState(VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo, VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state, SWAPCHAIN_NODE *old_swapchain_state) { if (VK_SUCCESS == result) { auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain)); if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode || VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) { swapchain_state->shared_presentable = true; } surface_state->swapchain = swapchain_state.get(); swapchainMap[*pSwapchain] = std::move(swapchain_state); } else { surface_state->swapchain = nullptr; } // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain is retired if (old_swapchain_state) { old_swapchain_state->retired = true; } return; } void CoreChecks::PostCallRecordCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain, VkResult result) { auto surface_state = GetSurfaceState(pCreateInfo->surface); auto old_swapchain_state = GetSwapchainNode(pCreateInfo->oldSwapchain); RecordCreateSwapchainState(result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state); } void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { if (!swapchain) return; auto swapchain_data = GetSwapchainNode(swapchain); if (swapchain_data) { if (swapchain_data->images.size() > 0) { for (auto swapchain_image : swapchain_data->images) { auto image_sub = imageSubresourceMap.find(swapchain_image); if (image_sub != imageSubresourceMap.end()) { for (auto imgsubpair : image_sub->second) { auto image_item = imageLayoutMap.find(imgsubpair); if (image_item != imageLayoutMap.end()) { imageLayoutMap.erase(image_item); } } imageSubresourceMap.erase(image_sub); } ClearMemoryObjectBindings(HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR); EraseQFOImageRelaseBarriers(swapchain_image); imageMap.erase(swapchain_image); } } auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface); if (surface_state) { if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr; } swapchainMap.erase(swapchain); } } bool CoreChecks::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) { auto swapchain_state = GetSwapchainNode(swapchain); bool skip = false; if (swapchain_state && pSwapchainImages) { // Compare the preliminary value of *pSwapchainImageCount with the value this time: if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_Swapchain_PriorCount, "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has " "been seen for pSwapchainImages."); } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_Swapchain_InvalidCount, "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a " "value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.", *pSwapchainImageCount, swapchain_state->get_swapchain_image_count); } } return skip; } void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages, VkResult result) { if ((result != VK_SUCCESS) && (result != VK_INCOMPLETE)) return; auto swapchain_state = GetSwapchainNode(swapchain); if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount); if (pSwapchainImages) { if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) { swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS; } for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) { if (swapchain_state->images[i] != VK_NULL_HANDLE) continue; // Already retrieved this. IMAGE_LAYOUT_NODE image_layout_node; image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED; image_layout_node.format = swapchain_state->createInfo.imageFormat; // Add imageMap entries for each swapchain image VkImageCreateInfo image_ci = {}; image_ci.flags = 0; image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.format = swapchain_state->createInfo.imageFormat; image_ci.extent.width = swapchain_state->createInfo.imageExtent.width; image_ci.extent.height = swapchain_state->createInfo.imageExtent.height; image_ci.extent.depth = 1; image_ci.mipLevels = 1; image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers; image_ci.samples = VK_SAMPLE_COUNT_1_BIT; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = swapchain_state->createInfo.imageUsage; image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode; imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci)); auto &image_state = imageMap[pSwapchainImages[i]]; image_state->valid = false; image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY; swapchain_state->images[i] = pSwapchainImages[i]; ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()}; imageSubresourceMap[pSwapchainImages[i]].push_back(subpair); imageLayoutMap[subpair] = image_layout_node; } } if (*pSwapchainImageCount) { if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) { swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT; } swapchain_state->get_swapchain_image_count = *pSwapchainImageCount; } } bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { bool skip = false; auto queue_state = GetQueueState(queue); for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { auto pSemaphore = GetSemaphoreNode(pPresentInfo->pWaitSemaphores[i]); if (pSemaphore && !pSemaphore->signaled) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, kVUID_Core_DrawState_QueueForwardProgress, "Queue %s is waiting on semaphore %s that has no way to be signaled.", report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str()); } } for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { auto swapchain_data = GetSwapchainNode(pPresentInfo->pSwapchains[i]); if (swapchain_data) { if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainInvalidImage, "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.", pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size()); } else { auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]]; auto image_state = GetImageState(image); if (image_state->shared_presentable) { image_state->layout_locked = true; } if (!image_state->acquired) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainImageNotAcquired, "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]); } vector<VkImageLayout> layouts; if (FindLayouts(image, layouts)) { for (auto layout : layouts) { if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!device_extensions.vk_khr_shared_presentable_image || (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue), "VUID-VkPresentInfoKHR-pImageIndices-01296", "Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.", string_VkImageLayout(layout)); } } } } // All physical devices and queue families are required to be able to present to any native window on Android; require // the application to have established support on any other platform. if (!instance_extensions.vk_khr_android_surface) { auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface); auto support_it = surface_state->gpu_queue_support.find({physical_device, queue_state->queueFamilyIndex}); if (support_it == surface_state->gpu_queue_support.end()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainUnsupportedQueue, "vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR"); } else if (!support_it->second) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-vkQueuePresentKHR-pSwapchains-01292", "vkQueuePresentKHR: Presenting image on queue that cannot present to this surface."); } } } } if (pPresentInfo && pPresentInfo->pNext) { // Verify ext struct const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext); if (present_regions) { for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) { auto swapchain_data = GetSwapchainNode(pPresentInfo->pSwapchains[i]); assert(swapchain_data); VkPresentRegionKHR region = present_regions->pRegions[i]; for (uint32_t j = 0; j < region.rectangleCount; ++j) { VkRectLayerKHR rect = region.pRectangles[j]; if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-offset-01261", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, " "pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater " "than the corresponding swapchain's imageExtent.width (%i).", i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width); } if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-offset-01261", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, " "pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater " "than the corresponding swapchain's imageExtent.height (%i).", i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height); } if (rect.layer > swapchain_data->createInfo.imageArrayLayers) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-layer-01262", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer " "(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).", i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers); } } } } const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext); if (present_times_info) { if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[0]), "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247", "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount " "is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, " "VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.", present_times_info->swapchainCount, pPresentInfo->swapchainCount); } } } return skip; } void CoreChecks::PostCallRecordQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo, VkResult result) { // Semaphore waits occur before error generation, if the call reached the ICD. (Confirm?) for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { auto pSemaphore = GetSemaphoreNode(pPresentInfo->pWaitSemaphores[i]); if (pSemaphore) { pSemaphore->signaler.first = VK_NULL_HANDLE; pSemaphore->signaled = false; } } for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { // Note: this is imperfect, in that we can get confused about what did or didn't succeed-- but if the app does that, it's // confused itself just as much. auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result; if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue; // this present didn't actually happen. // Mark the image as having been released to the WSI auto swapchain_data = GetSwapchainNode(pPresentInfo->pSwapchains[i]); if (swapchain_data && (swapchain_data->images.size() > pPresentInfo->pImageIndices[i])) { auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]]; auto image_state = GetImageState(image); if (image_state) { image_state->acquired = false; } } } // Note: even though presentation is directed to a queue, there is no direct ordering between QP and subsequent work, so QP (and // its semaphore waits) /never/ participate in any completion proof. } bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) { bool skip = false; if (pCreateInfos) { for (uint32_t i = 0; i < swapchainCount; i++) { auto surface_state = GetSurfaceState(pCreateInfos[i].surface); auto old_swapchain_state = GetSwapchainNode(pCreateInfos[i].oldSwapchain); std::stringstream func_name; func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()"; skip |= ValidateCreateSwapchain(func_name.str().c_str(), &pCreateInfos[i], surface_state, old_swapchain_state); } } return skip; } void CoreChecks::PostCallRecordCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains, VkResult result) { if (pCreateInfos) { for (uint32_t i = 0; i < swapchainCount; i++) { auto surface_state = GetSurfaceState(pCreateInfos[i].surface); auto old_swapchain_state = GetSwapchainNode(pCreateInfos[i].oldSwapchain); RecordCreateSwapchainState(result, &pCreateInfos[i], &pSwapchains[i], surface_state, old_swapchain_state); } } } bool CoreChecks::ValidateAcquireNextImage(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, const char *func_name) { bool skip = false; if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-vkAcquireNextImageKHR-semaphore-01780", "%s: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to " "determine the completion of this operation.", func_name); } auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), "VUID-vkAcquireNextImageKHR-semaphore-01286", "%s: Semaphore must not be currently signaled or in a wait state.", func_name); } auto pFence = GetFenceNode(fence); if (pFence) { skip |= ValidateFenceForSubmit(pFence); } auto swapchain_data = GetSwapchainNode(swapchain); if (swapchain_data && swapchain_data->retired) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(swapchain), "VUID-vkAcquireNextImageKHR-swapchain-01285", "%s: This swapchain has been retired. The application can still present any images it " "has acquired, but cannot acquire any more.", func_name); } auto physical_device_state = GetPhysicalDeviceState(); if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) { uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(), [=](VkImage image) { return GetImageState(image)->acquired; }); if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainTooManyImages, "%s: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")", func_name, acquired_images); } } if (swapchain_data && swapchain_data->images.size() == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainImagesNotFound, "%s: No images found to acquire from. Application probably did not call " "vkGetSwapchainImagesKHR after swapchain creation.", func_name); } return skip; } bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) { return ValidateAcquireNextImage(device, swapchain, timeout, semaphore, fence, pImageIndex, "vkAcquireNextImageKHR"); } bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex) { bool skip = false; skip |= ValidateDeviceMaskToPhysicalDeviceCount(pAcquireInfo->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pAcquireInfo->swapchain), "VUID-VkAcquireNextImageInfoKHR-deviceMask-01290"); skip |= ValidateDeviceMaskToZero(pAcquireInfo->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pAcquireInfo->swapchain), "VUID-VkAcquireNextImageInfoKHR-deviceMask-01291"); skip |= ValidateAcquireNextImage(device, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore, pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR"); return skip; } void CoreChecks::RecordAcquireNextImageState(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) { auto pFence = GetFenceNode(fence); if (pFence && pFence->scope == kSyncScopeInternal) { // Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary // import pFence->state = FENCE_INFLIGHT; pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof. } auto pSemaphore = GetSemaphoreNode(semaphore); if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) { // Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a // temporary import pSemaphore->signaled = true; pSemaphore->signaler.first = VK_NULL_HANDLE; } // Mark the image as acquired. auto swapchain_data = GetSwapchainNode(swapchain); if (swapchain_data && (swapchain_data->images.size() > *pImageIndex)) { auto image = swapchain_data->images[*pImageIndex]; auto image_state = GetImageState(image); if (image_state) { image_state->acquired = true; image_state->shared_presentable = swapchain_data->shared_presentable; } } } void CoreChecks::PostCallRecordAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, VkResult result) { if ((VK_SUCCESS != result) && (VK_SUBOPTIMAL_KHR != result)) return; RecordAcquireNextImageState(device, swapchain, timeout, semaphore, fence, pImageIndex); } void CoreChecks::PostCallRecordAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex, VkResult result) { if ((VK_SUCCESS != result) && (VK_SUBOPTIMAL_KHR != result)) return; RecordAcquireNextImageState(device, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore, pAcquireInfo->fence, pImageIndex); } void CoreChecks::PostCallRecordEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices, VkResult result) { if ((NULL != pPhysicalDevices) && ((result == VK_SUCCESS || result == VK_INCOMPLETE))) { for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) { auto &phys_device_state = physical_device_map[pPhysicalDevices[i]]; phys_device_state.phys_device = pPhysicalDevices[i]; // Init actual features for each physical device DispatchGetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features2.features); } } } // Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(debug_report_data *report_data, PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family_property_count, bool qfp_null, const char *caller_name) { bool skip = false; if (!qfp_null) { // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) { skip |= log_msg( report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_MissingQueryCount, "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended " "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.", caller_name, caller_name); // Then verify that pCount that is passed in on second call matches what was returned } else if (pd_state->queue_family_count != requested_queue_family_property_count) { skip |= log_msg( report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_CountMismatch, "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32 ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32 ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was " "previously obtained by calling %s with NULL pQueueFamilyProperties.", caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name); } pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS; } return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(report_data, physical_device_state, *pQueueFamilyPropertyCount, (nullptr == pQueueFamilyProperties), "vkGetPhysicalDeviceQueueFamilyProperties()"); } bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(report_data, physical_device_state, *pQueueFamilyPropertyCount, (nullptr == pQueueFamilyProperties), "vkGetPhysicalDeviceQueueFamilyProperties2()"); } bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(report_data, physical_device_state, *pQueueFamilyPropertyCount, (nullptr == pQueueFamilyProperties), "vkGetPhysicalDeviceQueueFamilyProperties2KHR()"); } // Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) { if (!pQueueFamilyProperties) { if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT; pd_state->queue_family_count = count; } else { // Save queue family properties pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS; pd_state->queue_family_count = std::max(pd_state->queue_family_count, count); pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count)); for (uint32_t i = 0; i < count; ++i) { pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties; } } } void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); VkQueueFamilyProperties2KHR *pqfp = nullptr; std::vector<VkQueueFamilyProperties2KHR> qfp; qfp.resize(*pQueueFamilyPropertyCount); if (pQueueFamilyProperties) { for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; ++i) { qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR; qfp[i].pNext = nullptr; qfp[i].queueFamilyProperties = pQueueFamilyProperties[i]; } pqfp = qfp.data(); } StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pqfp); } void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties); } void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties); } bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) { auto surface_state = GetSurfaceState(surface); bool skip = false; if ((surface_state) && (surface_state->swapchain)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, HandleToUint64(instance), "VUID-vkDestroySurfaceKHR-surface-01266", "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed."); } return skip; } void CoreChecks::PreCallRecordValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) { surface_map.erase(surface); } void CoreChecks::RecordVulkanSurface(VkSurfaceKHR *pSurface) { surface_map[*pSurface] = SURFACE_STATE(*pSurface); } void CoreChecks::PostCallRecordCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } #ifdef VK_USE_PLATFORM_ANDROID_KHR void CoreChecks::PostCallRecordCreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } #endif // VK_USE_PLATFORM_ANDROID_KHR #ifdef VK_USE_PLATFORM_IOS_MVK void CoreChecks::PostCallRecordCreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } #endif // VK_USE_PLATFORM_IOS_MVK #ifdef VK_USE_PLATFORM_MACOS_MVK void CoreChecks::PostCallRecordCreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } #endif // VK_USE_PLATFORM_MACOS_MVK #ifdef VK_USE_PLATFORM_WAYLAND_KHR void CoreChecks::PostCallRecordCreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display *display) { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidatePhysicalDeviceQueueFamily(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306", "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_WAYLAND_KHR #ifdef VK_USE_PLATFORM_WIN32_KHR void CoreChecks::PostCallRecordCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidatePhysicalDeviceQueueFamily(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309", "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_WIN32_KHR #ifdef VK_USE_PLATFORM_XCB_KHR void CoreChecks::PostCallRecordCreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t *connection, xcb_visualid_t visual_id) { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidatePhysicalDeviceQueueFamily(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312", "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_XCB_KHR #ifdef VK_USE_PLATFORM_XLIB_KHR void CoreChecks::PostCallRecordCreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display *dpy, VisualID visualID) { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidatePhysicalDeviceQueueFamily(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315", "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_XLIB_KHR void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR *pSurfaceCapabilities, VkResult result) { if (VK_SUCCESS != result) return; auto physical_device_state = GetPhysicalDeviceState(physicalDevice); physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS; physical_device_state->surfaceCapabilities = *pSurfaceCapabilities; } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, VkSurfaceCapabilities2KHR *pSurfaceCapabilities, VkResult result) { if (VK_SUCCESS != result) return; auto physical_device_state = GetPhysicalDeviceState(physicalDevice); physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS; physical_device_state->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities; } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT *pSurfaceCapabilities, VkResult result) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS; physical_device_state->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount; physical_device_state->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount; physical_device_state->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent; physical_device_state->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent; physical_device_state->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent; physical_device_state->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers; physical_device_state->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms; physical_device_state->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform; physical_device_state->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha; physical_device_state->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags; } bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32 *pSupported) { const auto physical_device_state = GetPhysicalDeviceState(physicalDevice); return ValidatePhysicalDeviceQueueFamily(physical_device_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269", "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex"); } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32 *pSupported, VkResult result) { if (VK_SUCCESS != result) return; auto surface_state = GetSurfaceState(surface); surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE); } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pPresentModeCount, VkPresentModeKHR *pPresentModes, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; // TODO: This isn't quite right -- available modes may differ by surface AND physical device. auto physical_device_state = GetPhysicalDeviceState(physicalDevice); auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState; if (*pPresentModeCount) { if (call_state < QUERY_COUNT) call_state = QUERY_COUNT; if (*pPresentModeCount > physical_device_state->present_modes.size()) physical_device_state->present_modes.resize(*pPresentModeCount); } if (pPresentModes) { if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS; for (uint32_t i = 0; i < *pPresentModeCount; i++) { physical_device_state->present_modes[i] = pPresentModes[i]; } } } bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats) { if (!pSurfaceFormats) return false; auto physical_device_state = GetPhysicalDeviceState(physicalDevice); auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState; bool skip = false; switch (call_state) { case UNCALLED: // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application didn't // previously call this function with a NULL value of pSurfaceFormats: skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), kVUID_Core_DevLimit_MustQueryCount, "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior " "positive value has been seen for pSurfaceFormats."); break; default: auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size(); if (prev_format_count != *pSurfaceFormatCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), kVUID_Core_DevLimit_CountMismatch, "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with " "pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned " "when pSurfaceFormatCount was NULL.", *pSurfaceFormatCount, prev_format_count); } break; } return skip; } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; auto physical_device_state = GetPhysicalDeviceState(physicalDevice); auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState; if (*pSurfaceFormatCount) { if (call_state < QUERY_COUNT) call_state = QUERY_COUNT; if (*pSurfaceFormatCount > physical_device_state->surface_formats.size()) physical_device_state->surface_formats.resize(*pSurfaceFormatCount); } if (pSurfaceFormats) { if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS; for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) { physical_device_state->surface_formats[i] = pSurfaceFormats[i]; } } } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; auto physicalDeviceState = GetPhysicalDeviceState(physicalDevice); if (*pSurfaceFormatCount) { if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) { physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT; } if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size()) physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount); } if (pSurfaceFormats) { if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) { physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS; } for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) { physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat; } } } void CoreChecks::PreCallRecordCmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) { BeginCmdDebugUtilsLabel(report_data, commandBuffer, pLabelInfo); } void CoreChecks::PostCallRecordCmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) { EndCmdDebugUtilsLabel(report_data, commandBuffer); } void CoreChecks::PreCallRecordCmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) { InsertCmdDebugUtilsLabel(report_data, commandBuffer, pLabelInfo); // Squirrel away an easily accessible copy. GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); cb_state->debug_label = LoggingLabel(pLabelInfo); } void CoreChecks::PostRecordEnumeratePhysicalDeviceGroupsState(uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) { if (NULL != pPhysicalDeviceGroupProperties) { for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) { for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) { VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j]; auto &phys_device_state = physical_device_map[cur_phys_dev]; phys_device_state.phys_device = cur_phys_dev; // Init actual features for each physical device DispatchGetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features2.features); } } } } void CoreChecks::PostCallRecordEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; PostRecordEnumeratePhysicalDeviceGroupsState(pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties); } void CoreChecks::PostCallRecordEnumeratePhysicalDeviceGroupsKHR(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; PostRecordEnumeratePhysicalDeviceGroupsState(pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties); } bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo) { bool skip = false; const auto layout = GetDescriptorSetLayout(this, pCreateInfo->descriptorSetLayout); if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) { auto ds_uint = HandleToUint64(pCreateInfo->descriptorSetLayout); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, ds_uint, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350", "%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name, report_data->FormatHandle(ds_uint).c_str()); } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) { auto bind_point = pCreateInfo->pipelineBindPoint; bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE); if (!valid_bp) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351", "%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point)); } const auto pipeline_layout = GetPipelineLayout(pCreateInfo->pipelineLayout); if (!pipeline_layout) { uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352", "%s: Invalid pCreateInfo->pipelineLayout (%s)", func_name, report_data->FormatHandle(pl_uint).c_str()); } else { const uint32_t pd_set = pCreateInfo->set; if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] || !pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) { uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353", "%s: pCreateInfo->set (%" PRIu32 ") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).", func_name, pd_set, report_data->FormatHandle(pl_uint).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", pCreateInfo); return skip; } bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", pCreateInfo); return skip; } void CoreChecks::PreCallRecordDestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const VkAllocationCallbacks *pAllocator) { if (!descriptorUpdateTemplate) return; desc_template_map.erase(descriptorUpdateTemplate); } void CoreChecks::PreCallRecordDestroyDescriptorUpdateTemplateKHR(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const VkAllocationCallbacks *pAllocator) { if (!descriptorUpdateTemplate) return; desc_template_map.erase(descriptorUpdateTemplate); } void CoreChecks::RecordCreateDescriptorUpdateTemplateState(const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo); std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info)); desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state); } void CoreChecks::PostCallRecordCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate, VkResult result) { if (VK_SUCCESS != result) return; RecordCreateDescriptorUpdateTemplateState(pCreateInfo, pDescriptorUpdateTemplate); } void CoreChecks::PostCallRecordCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate, VkResult result) { if (VK_SUCCESS != result) return; RecordCreateDescriptorUpdateTemplateState(pCreateInfo, pDescriptorUpdateTemplate); } bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) { bool skip = false; auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate); if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) { // Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds // but retaining the assert as template support is new enough to want to investigate these in debug builds. assert(0); } else { const TEMPLATE_STATE *template_state = template_map_entry->second.get(); // TODO: Validate template push descriptor updates if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) { skip = ValidateUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData); } } return skip; } bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) { return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData); } bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) { return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData); } void CoreChecks::RecordUpdateDescriptorSetWithTemplateState(VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) { auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate); if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) { assert(0); } else { const TEMPLATE_STATE *template_state = template_map_entry->second.get(); // TODO: Record template push descriptor updates if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) { PerformUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData); } } } void CoreChecks::PreCallRecordUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) { RecordUpdateDescriptorSetWithTemplateState(descriptorSet, descriptorUpdateTemplate, pData); } void CoreChecks::PreCallRecordUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) { RecordUpdateDescriptorSetWithTemplateState(descriptorSet, descriptorUpdateTemplate, pData); } static std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> GetDslFromPipelineLayout(PIPELINE_LAYOUT_NODE const *layout_data, uint32_t set) { std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> dsl = nullptr; if (layout_data && (set < layout_data->set_layouts.size())) { dsl = layout_data->set_layouts[set]; } return dsl; } bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void *pData) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()"; bool skip = false; skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, func_name); auto layout_data = GetPipelineLayout(layout); auto dsl = GetDslFromPipelineLayout(layout_data, set); const auto layout_u64 = HandleToUint64(layout); // Validate the set index points to a push descriptor set and is in range if (dsl) { if (!dsl->IsPushDescriptor()) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00365", "%s: Set index %" PRIu32 " does not match push descriptor set layout index for VkPipelineLayout %s.", func_name, set, report_data->FormatHandle(layout_u64).c_str()); } } else if (layout_data && (set >= layout_data->set_layouts.size())) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00364", "%s: Set index %" PRIu32 " is outside of range for VkPipelineLayout %s (set < %" PRIu32 ").", func_name, set, report_data->FormatHandle(layout_u64).c_str(), static_cast<uint32_t>(layout_data->set_layouts.size())); } const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate); if (template_state) { const auto &template_ci = template_state->create_info; static const std::map<VkPipelineBindPoint, std::string> bind_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")}; skip |= ValidatePipelineBindPoint(cb_state, template_ci.pipelineBindPoint, func_name, bind_errors); if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_TemplateType, "%s: descriptorUpdateTemplate %s was not created with flag " "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.", func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str()); } if (template_ci.set != set) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_Template_SetMismatched, "%s: descriptorUpdateTemplate %s created with set %" PRIu32 " does not match command parameter set %" PRIu32 ".", func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set); } if (!CompatForSet(set, layout_data, GetPipelineLayout(template_ci.pipelineLayout))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched, "%s: descriptorUpdateTemplate %s created with pipelineLayout %s is incompatible with command parameter " "layout %s for set %" PRIu32, func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), report_data->FormatHandle(template_ci.pipelineLayout).c_str(), report_data->FormatHandle(layout).c_str(), set); } } if (dsl && template_state) { // Create an empty proxy in order to use the existing descriptor set update validation cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, VK_NULL_HANDLE, dsl, 0, this); // Decode the template into a set of write updates cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData, dsl->GetDescriptorSetLayout()); // Validate the decoded update against the proxy_ds skip |= proxy_ds.ValidatePushDescriptorsUpdate(report_data, static_cast<uint32_t>(decoded_template.desc_writes.size()), decoded_template.desc_writes.data(), func_name); } return skip; } void CoreChecks::PreCallRecordCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void *pData) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate); if (template_state) { auto layout_data = GetPipelineLayout(layout); auto dsl = GetDslFromPipelineLayout(layout_data, set); const auto &template_ci = template_state->create_info; if (dsl && !dsl->IsDestroyed()) { // Decode the template into a set of write updates cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData, dsl->GetDescriptorSetLayout()); RecordCmdPushDescriptorSetState(cb_state, template_ci.pipelineBindPoint, layout, set, static_cast<uint32_t>(decoded_template.desc_writes.size()), decoded_template.desc_writes.data()); } } } void CoreChecks::RecordGetPhysicalDeviceDisplayPlanePropertiesState(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, void *pProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); if (*pPropertyCount) { if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) { physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT; } physical_device_state->display_plane_property_count = *pPropertyCount; } if (pProperties) { if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) { physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS; } } } void CoreChecks::PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, VkDisplayPlanePropertiesKHR *pProperties, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; RecordGetPhysicalDeviceDisplayPlanePropertiesState(physicalDevice, pPropertyCount, pProperties); } void CoreChecks::PostCallRecordGetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, VkDisplayPlaneProperties2KHR *pProperties, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; RecordGetPhysicalDeviceDisplayPlanePropertiesState(physicalDevice, pPropertyCount, pProperties); } bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, uint32_t planeIndex, const char *api_name) { bool skip = false; auto physical_device_state = GetPhysicalDeviceState(physicalDevice); if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), kVUID_Core_Swapchain_GetSupportedDisplaysWithoutQuery, "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR " "or vkGetPhysicalDeviceDisplayPlaneProperties2KHR.", api_name); } else { if (planeIndex >= physical_device_state->display_plane_property_count) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249", "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR " "or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?", api_name, physical_device_state->display_plane_property_count - 1); } } return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneSupportedDisplaysKHR"); return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneCapabilitiesKHR"); return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR *pCapabilities) { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, pDisplayPlaneInfo->planeIndex, "vkGetDisplayPlaneCapabilities2KHR"); return skip; } bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); return ValidateCmd(cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()"); } bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); assert(cb_state); return ValidateCmd(cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()"); } bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); // Minimal validation for command buffer state return ValidateCmd(cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()"); } bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT *pSampleLocationsInfo) { GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); // Minimal validation for command buffer state return ValidateCmd(cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()"); } bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const char *func_name, const VkSamplerYcbcrConversionCreateInfo *create_info) { bool skip = false; if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) { skip |= ValidateCreateSamplerYcbcrConversionANDROID(create_info); } else { // Not android hardware buffer if (VK_FORMAT_UNDEFINED == create_info->format) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01649", "%s: CreateInfo format type is VK_FORMAT_UNDEFINED.", func_name); } } return skip; } bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) { return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversion()", pCreateInfo); } bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) { return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversionKHR()", pCreateInfo); } void CoreChecks::RecordCreateSamplerYcbcrConversionState(const VkSamplerYcbcrConversionCreateInfo *create_info, VkSamplerYcbcrConversion ycbcr_conversion) { if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) { RecordCreateSamplerYcbcrConversionANDROID(create_info, ycbcr_conversion); } } void CoreChecks::PostCallRecordCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion, VkResult result) { if (VK_SUCCESS != result) return; RecordCreateSamplerYcbcrConversionState(pCreateInfo, *pYcbcrConversion); } void CoreChecks::PostCallRecordCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion, VkResult result) { if (VK_SUCCESS != result) return; RecordCreateSamplerYcbcrConversionState(pCreateInfo, *pYcbcrConversion); } void CoreChecks::PostCallRecordDestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks *pAllocator) { if (!ycbcrConversion) return; if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) { RecordDestroySamplerYcbcrConversionANDROID(ycbcrConversion); } } void CoreChecks::PostCallRecordDestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks *pAllocator) { if (!ycbcrConversion) return; if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) { RecordDestroySamplerYcbcrConversionANDROID(ycbcrConversion); } } bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT *pInfo) { bool skip = false; if (!GetEnabledFeatures()->buffer_address.bufferDeviceAddress) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(pInfo->buffer), "VUID-vkGetBufferDeviceAddressEXT-None-02598", "The bufferDeviceAddress feature must: be enabled."); } if (physical_device_count > 1 && !GetEnabledFeatures()->buffer_address.bufferDeviceAddressMultiDevice) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(pInfo->buffer), "VUID-vkGetBufferDeviceAddressEXT-device-02599", "If device was created with multiple physical devices, then the " "bufferDeviceAddressMultiDevice feature must: be enabled."); } auto buffer_state = GetBufferState(pInfo->buffer); if (buffer_state) { if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT)) { skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkGetBufferDeviceAddressEXT()", "VUID-VkBufferDeviceAddressInfoEXT-buffer-02600"); } skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT, true, "VUID-VkBufferDeviceAddressInfoEXT-buffer-02601", "vkGetBufferDeviceAddressEXT()", "VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT"); } return skip; } void CoreChecks::PreCallRecordGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pPhysicalDeviceProperties) { if (GetEnables()->gpu_validation && GetEnables()->gpu_validation_reserve_binding_slot) { if (pPhysicalDeviceProperties->limits.maxBoundDescriptorSets > 1) { pPhysicalDeviceProperties->limits.maxBoundDescriptorSets -= 1; } else { log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), "UNASSIGNED-GPU-Assisted Validation Setup Error.", "Unable to reserve descriptor binding slot on a device with only one slot."); } } } VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkValidationCacheEXT *pValidationCache) { *pValidationCache = ValidationCache::Create(pCreateInfo); return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED; } void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks *pAllocator) { delete CastFromHandle<ValidationCache *>(validationCache); } VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize, void *pData) { size_t inSize = *pDataSize; CastFromHandle<ValidationCache *>(validationCache)->Write(pDataSize, pData); return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS; } VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT *pSrcCaches) { bool skip = false; auto dst = CastFromHandle<ValidationCache *>(dstCache); VkResult result = VK_SUCCESS; for (uint32_t i = 0; i < srcCacheCount; i++) { auto src = CastFromHandle<const ValidationCache *>(pSrcCaches[i]); if (src == dst) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT, 0, "VUID-vkMergeValidationCachesEXT-dstCache-01536", "vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.", HandleToUint64(dstCache)); result = VK_ERROR_VALIDATION_FAILED_EXT; } if (!skip) { dst->Merge(src); } } return result; } bool CoreChecks::PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) { bool skip = false; GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer); skip |= ValidateDeviceMaskToPhysicalDeviceCount(deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00108"); skip |= ValidateDeviceMaskToZero(deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00109"); skip |= ValidateDeviceMaskToCommandBuffer(cb_state, deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00110"); if (cb_state->activeRenderPass) { skip |= ValidateDeviceMaskToRenderPass(cb_state, deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00111"); } return skip; }
1
10,686
This one should probably be added to the list in #24 . It's possibly indicative of an issue, but there isn't a VUID for it.
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -139,13 +139,11 @@ def build_network_url(project, network): The fully qualified network url for the given project/network. """ return (u'%(root)s%(api_name)s/%(version)s/projects/%(project)s/global/' - 'networks/%(network)s') % { - 'api_name': API_NAME, - 'network': network, - 'project': project, - 'root': API_ROOT, - 'version': API_VERSION - } + 'networks/%(network)s') % {'api_name': API_NAME, + 'network': network, + 'project': project, + 'root': API_ROOT, + 'version': API_VERSION} class ComputeFirewallAPI(object):
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Core classes for firewall policy enforcement and calls to the compute API. Simplifies the interface with the compute API for managing firewall policies. """ import hashlib import httplib import json import operator import socket import ssl import time from googleapiclient import errors import httplib2 from retrying import retry from google.apputils import datelib from google.cloud.forseti.common.util import logger # TODO: The next editor must remove this disable and correct issues. # pylint: disable=missing-type-doc,missing-return-type-doc,missing-return-doc # pylint: disable=missing-param-doc,missing-raises-doc,too-many-lines # The name of the GCE API. API_NAME = 'compute' # The root of the GCE API. API_ROOT = 'https://www.googleapis.com/' # The version of the GCE API to use. API_VERSION = 'beta' # The compute engine scope. SCOPE = 'https://www.googleapis.com/auth/compute' LOGGER = logger.get_logger(__name__) # What transient exceptions should be retried. RETRY_EXCEPTIONS = (httplib.ResponseNotReady, httplib.IncompleteRead, httplib2.ServerNotFoundError, socket.error, ssl.SSLError,) # Allowed items in a firewall rule. ALLOWED_RULE_ITEMS = frozenset(('allowed', 'denied', 'description', 'direction', 'name', 'network', 'priority', 'sourceRanges', 'destinationRanges', 'sourceTags', 'targetTags')) # Maximum time to allow an active API operation to wait for status=Done OPERATION_TIMEOUT = 600.0 class Error(Exception): """Base error class for the module.""" class InvalidFirewallRuleError(Error): """Raised if a firewall rule doesn't look like a firewall rule should.""" class FirewallRuleValidationError(Error): """Raised if a firewall rule fails validation.""" class DuplicateFirewallRuleNameError(Error): """Raised if a rule name is reused in a policy, names must be unique.""" class FirewallEnforcementFailedError(Error): """Updating firewall for project failed.""" class FirewallEnforcementInsertFailedError(FirewallEnforcementFailedError): """Insertion of a firewall rule failed.""" class FirewallEnforcementUpdateFailedError(FirewallEnforcementFailedError): """Update of a firewall rule failed.""" class FirewallEnforcementDeleteFailedError(FirewallEnforcementFailedError): """Deletion of a firewall rule failed.""" class NetworkImpactValidationError(FirewallEnforcementFailedError): """Raised if a firewall rule is to be applied to a disallowed network.""" class EmptyProposedFirewallRuleSetError(FirewallEnforcementFailedError): """Raised if the proposed firewall rule set is empty.""" class FirewallQuotaExceededError(FirewallEnforcementFailedError): """Raised if the proposed changes would exceed firewall quota.""" def http_retry(e): """retry_on_exception for retry. Returns True for exceptions to retry.""" if isinstance(e, RETRY_EXCEPTIONS): return True return False def get_network_name_from_url(network_url): """Given a network URL, return the name of the network. Args: network_url: str - the fully qualified network url, such as (https://www.googleapis.com/compute/v1/projects/' 'my-proj/global/networks/my-network') Returns: str - the network name, my-network in the previous example """ return network_url.split('/')[-1] def build_network_url(project, network): """Render the network url from project and network names. Args: project: A str- The name of the GCE project to operate upon. network: A str- The name of the network to operate upon. Returns: The fully qualified network url for the given project/network. """ return (u'%(root)s%(api_name)s/%(version)s/projects/%(project)s/global/' 'networks/%(network)s') % { 'api_name': API_NAME, 'network': network, 'project': project, 'root': API_ROOT, 'version': API_VERSION } class ComputeFirewallAPI(object): """Wrap calls to the Google Compute Engine API. API calls are decorated with retry to ensure temporary network errors do not cause failures. If initialized in dry run mode, calls which could modify the compute project are no-ops and always return a successful result. """ def __init__(self, gce_service, dry_run=False): """Constructor. Args: gce_service: A GCE service object built using the discovery API. dry_run: Bool - True to perform a dry run for reporting firewall changes. """ self.gce_service = gce_service self._dry_run = dry_run # pylint: disable=no-self-use @retry( retry_on_exception=http_retry, wait_exponential_multiplier=1000, stop_max_attempt_number=4) def _execute(self, request): """Execute the request and retry logic.""" return request.execute(num_retries=4) # pylint: enable=no-self-use def list_networks(self, project, fields=None): """List the networks associated with a GCE project. Args: project: The id of the project to query. fields: If defined, limits the response to a subset of all fields. Returns: The GCE response. """ LOGGER.debug('Listing networks...') request = self.gce_service.networks().list( project=project, fields=fields) return self._execute(request) def list_firewalls(self, project, page_token=None): """List the firewalls of a given project. Args: project: The id of the project to query. page_token: A str or None- if set, then a pageToken to pass to the GCE api call. Returns: The GCE response. """ LOGGER.debug('Listing firewalls...') request = self.gce_service.firewalls().list( project=project, pageToken=page_token) return self._execute(request) def get_firewalls_quota(self, project): """Fetch the current FIREWALLS quota for the project. Args: project: The id of the project to query. Returns: A dictionary with three keys, metric, limit and usage. Example: {"metric": "FIREWALLS", "limit": 100, "usage": 9} """ request = self.gce_service.projects().get( project=project, fields='quotas') response = self._execute(request) for quota in response.get('quotas', []): if quota.get('metric', '') == 'FIREWALLS': return quota return {} def delete_firewall_rule(self, project, rule): """Delete firewall rules. Args: project: The id of the project to modify. rule: The firewall rule dict to delete. Returns: The GCE response. """ LOGGER.info('Deleting firewall rule %s on project %s. Deleted rule: %s', rule['name'], project, json.dumps(rule)) if self._dry_run: return self._create_dry_run_response(rule['name']) request = self.gce_service.firewalls().delete( firewall=rule['name'], project=project) return self._execute(request) def insert_firewall_rule(self, project, rule): """Insert a firewall rule. Args: project: The id of the project to modify. rule: The firewall rule dict to add. Returns: The GCE response. """ LOGGER.info( 'Inserting firewall rule %s on project %s. Inserted rule: %s', rule['name'], project, json.dumps(rule)) if self._dry_run: return self._create_dry_run_response(rule['name']) request = self.gce_service.firewalls().insert( body=rule, project=project) return self._execute(request) def update_firewall_rule(self, project, rule): """Update a firewall rule. Args: project: The id of the project to modify. rule: The firewall rule dict to update. Returns: The GCE response. """ LOGGER.info('Updating firewall rule %s on project %s. Updated rule: %s', rule['name'], project, json.dumps(rule)) if self._dry_run: return self._create_dry_run_response(rule['name']) request = self.gce_service.firewalls().update( body=rule, firewall=rule['name'], project=project) return self._execute(request) # TODO: Investigate improving so we can avoid the pylint disable. # pylint: disable=too-many-locals def wait_for_any_to_complete(self, project, responses, timeout=0): """Wait for one or more requests to complete. Args: project: The id of the project to query. responses: A list of Response objects from GCE for the operation. timeout: An optional maximum time in seconds to wait for an operation to complete. Operations that exceed the timeout are marked as Failed. Returns: A tuple of (completed, still_running) requests. """ started_timestamp = time.time() while True: completed_operations = [] running_operations = [] for response in responses: status = response['status'] if status == 'DONE': completed_operations.append(response) continue operation_name = response['name'] LOGGER.debug('Checking on operation %s', operation_name) request = self.gce_service.globalOperations().get( project=project, operation=operation_name) response = self._execute(request) status = response['status'] LOGGER.info('status of %s is %s', operation_name, status) if response['status'] == 'DONE': completed_operations.append(response) continue if timeout and time.time() - started_timestamp > timeout: # Add a timeout error to the response LOGGER.error( 'Operation %s did not complete before timeout of %f, ' 'marking operation as failed.', operation_name, timeout) response.setdefault('error', {}).setdefault( 'errors', []).append({ 'code': 'OPERATION_TIMEOUT', 'message': ( 'Operation exceeded timeout for completion ' 'of %0.2f seconds' % timeout) }) completed_operations.append(response) else: # Operation still running running_operations.append(response) if completed_operations or not responses: break else: time.sleep(2) for response in completed_operations: try: op_insert_timestamp = datelib.Timestamp.FromString( response.get('insertTime', '')).AsSecondsSinceEpoch() op_start_timestamp = datelib.Timestamp.FromString( response.get('startTime', '')).AsSecondsSinceEpoch() op_end_timestamp = datelib.Timestamp.FromString( response.get('endTime', '')).AsSecondsSinceEpoch() except ValueError: op_insert_timestamp = op_start_timestamp = op_end_timestamp = 0 op_wait_time = op_end_timestamp - op_insert_timestamp op_exec_time = op_end_timestamp - op_start_timestamp LOGGER.info('Operation %s completed. Operation type: %s, ' 'request time: %s, start time: %s, finished time: %s, ' 'req->end seconds: %i, start->end seconds: %i.', response.get('name', ''), response.get('operationType', ''), response.get('insertTime', ''), response.get('startTime', ''), response.get('endTime', ''), op_wait_time, op_exec_time) LOGGER.debug('Operation response object: %r', response) return (completed_operations, running_operations) def wait_for_all_to_complete(self, project, responses, timeout=0): """Wait for all requests to complete. Args: project: The id of the project to query. responses: A list of Response objects from GCE for the operation. timeout: An optional maximum time in seconds to wait for an operation to complete. Operations that exceed the timeout are marked as Failed. Returns: A list of completed requests. """ completed_operations = [] running_operations = responses while running_operations: (completed, running_operations) = (self.wait_for_any_to_complete( project, running_operations, timeout)) completed_operations.extend(completed) return completed_operations # pylint: disable=no-self-use # TODO: Investigate fixing the pylint issue. def is_successful(self, response): """Checks if the operation finished with no errors. If the operation response contains an 'error' key, then the error code is checked. Any error code that is not ignored causes this to return False. Args: response: A GlobalOperations response object from an API call. Returns: True if there were no errors, or all errors are ignored, otherwise False. """ success = True if 'error' in response: # 'error' should always contains an 'errors' list: if 'errors' in response['error']: for error in response['error']['errors']: # TODO: Verify current codes. # We ignore the following errors: # RESOURCE_ALREADY_EXISTS: Because another program somewhere # else could have already added the rule. # INVALID_FIELD_VALUE: Because the network probably # disappeared out from under us. if error.get('code') in [ 'RESOURCE_ALREADY_EXISTS', 'INVALID_FIELD_VALUE' ]: LOGGER.warn('Ignoring error: %s', error) else: LOGGER.error('Response has error: %s', error) success = False else: LOGGER.error('Unknown error response: %s', response['error']) success = False return success # pylint: disable=no-self-use # TODO: Investigate fixing the pylint issue. def _create_dry_run_response(self, rule_name): """A fake successful completed response. This is used for dry run execution to prevent any changes to the existing firewall rules on a project. Args: rule_name: The name of the firewall rule this response is for. Returns: A fake successful completed response. """ return {'status': 'DONE', 'name': rule_name} class FirewallRules(object): """A collection of validated firewall rules.""" DEFAULT_PRIORITY = 1000 DEFAULT_DIRECTION = 'INGRESS' def __init__(self, project, rules=None, add_rule_callback=None): """Constructor. Args: project: The GCE project id the rules apply to. rules: A list of rule dicts to add to the object. add_rule_callback: A callback function that checks whether a firewall rule should be applied. If the callback returns False, that rule will not be modified. Raises: DuplicateFirewallRuleNameError: Two or more rules have the same name. InvalidFirewallRuleError: One or more rules failed validation. """ self._project = project self.rules = {} self._add_rule_callback = add_rule_callback if rules: self.add_rules(rules) def __eq__(self, other): """Equality.""" return self.rules == other.rules def __ne__(self, other): """Not Equal.""" return self.rules != other.rules def add_rules_from_api(self, firewall_api): """Loads rules from compute.firewalls().list(). Args: firewall_api: A ComputeFirewallAPI instance for interfacing with GCE API. Raises: DuplicateFirewallRuleNameError: Two rules have the same name. InvalidFirewallRuleError: A rule failed validation. """ if self.rules: LOGGER.warn( 'Can not import rules from the API into a FirewallRules ' 'object with rules already added') return page_token = '' while True: if page_token: response = firewall_api.list_firewalls( self._project, page_token=page_token) else: response = firewall_api.list_firewalls(self._project) for item in response.get('items', []): rule = dict([(key, item[key]) for key in ALLOWED_RULE_ITEMS if key in item]) self.add_rule(rule) # Are there additional pages of data? if 'nextPageToken' in response: page_token = response['nextPageToken'] else: break def add_rules(self, rules, network_name=None): """Adds rules from a list of rule dicts. Args: rules: A list of rule dicts to add to the object network_name: If set, rules which have no network currently defined will have their network set to network_name, and network_name will be prepended to the rule name. Rules that do have a network defined have their network matched against network_name, and if they differ the rule is not added. Raises: DuplicateFirewallRuleNameError: Two or more rules have the same name. InvalidFirewallRuleError: One or more rules failed validation. """ for rule in rules: self.add_rule(rule, network_name=network_name) def add_rule(self, rule, network_name=None): """Adds rule to the self.rules dictionary. Args: rule: A valid dict representing a GCE firewall rule network_name: If set, rules which have no network currently defined will have their network set to network_name, and network_name will be prepended to the rule name. Rules that do have a network defined have their network matched against network_name, and if they differ the rule is not added. Raises: DuplicateFirewallRuleNameError: Two or more rules have the same name. InvalidFirewallRuleError: One or more rules failed validation. """ if not isinstance(rule, dict): raise InvalidFirewallRuleError( 'Invalid rule type. Found %s expected dict' % type(rule)) new_rule = self._order_lists_in_rule(rule) if network_name: if 'network' in new_rule: rule_network = get_network_name_from_url(new_rule['network']) if rule_network != network_name: # Don't add the rule if it's network does not match # network_name LOGGER.info('Firewall rule does not apply to network %s, ' 'skipping: %s', rule_network, json.dumps(new_rule)) return else: new_rule['network'] = build_network_url(self._project, network_name) # Update the rule name by prepending the network, so it is # unique. If the new rule does not have a name defined it will # fail the _check_rule_before_adding validation and an # InvalidFirewallRuleError exception will be raised. if 'name' in new_rule: # Truncate network name if too long. This may result in # duplicate rule names, which will cause the network name # to be changed to a md5 hash representation. new_name = '%s-%s' % ( network_name[:(62 - len(new_rule['name']))], new_rule['name']) while new_name in self.rules: # Firewall rule names must start with [a-z], hashes # could start with a number, so we prepend hn- # (hashed network) to the name. network_name = 'hn-' + hashlib.md5( network_name).hexdigest() new_name = '%s-%s' % ( network_name[:(62 - len(new_rule['name']))], new_rule['name']) new_rule['name'] = new_name if 'priority' not in new_rule: new_rule['priority'] = self.DEFAULT_PRIORITY if 'direction' not in new_rule: new_rule['direction'] = self.DEFAULT_DIRECTION if self._check_rule_before_adding(new_rule): self.rules[new_rule['name']] = new_rule def filtered_by_networks(self, networks): """Returns the subset of rules that apply to the specified network(s). Args: networks: A list of one or more network names to fetch rules for. Returns: A dictionary of rules that apply to the filtered networks. """ filtered_rules = {} for rule_name, rule in self.rules.items(): if get_network_name_from_url(rule['network']) in networks: filtered_rules[rule_name] = rule return filtered_rules def as_json(self): """Export rules to a json string. The JSON string should be an array of Firewall resource objects, see https://cloud.google.com/compute/docs/reference/latest/firewalls for details. Only the fields in ALLOWED_RULE_ITEMS are permitted. Returns: A JSON string with an array of rules sorted by network and name. """ rules = sorted( self.rules.values(), key=operator.itemgetter('network', 'name')) return json.dumps(rules, sort_keys=True) def add_rules_from_json(self, json_rules): """Import rules from a json string as exported by as_json. The JSON string should be an array of Firewall resource objects, see https://cloud.google.com/compute/docs/reference/latest/firewalls for details. Only the fields in ALLOWED_RULE_ITEMS are permitted. The legacy format from older versions of GCE Enforcer is also supported. This format wraps the array of Firewall resources in a dictionary under the key 'items'. Args: json_rules: The JSON formatted string containing the rules to import. Raises: DuplicateFirewallRuleNameError: Two or more rules have the same name. InvalidFirewallRuleError: One or more rules failed validation. """ if self.rules: LOGGER.warn('Can not import from JSON into a FirewallRules object ' 'with rules already added') return rules = json.loads(json_rules) if isinstance(rules, list): for rule in rules: self.add_rule(rule) elif isinstance(rules, dict): if 'items' in rules: for item in rules['items']: rule = dict([(key, item[key]) for key in ALLOWED_RULE_ITEMS if key in item]) self.add_rule(rule) def _order_lists_in_rule(self, unsorted_rule): """Recursively iterates a rule dictionary and sorts any lists. This ensures that two rule with the same polices, but with unordered lists will compare equal when tested. Args: unsorted_rule: A rule dictionary that has not been sorted. Returns: A new rule dictionary with the lists sorted """ sorted_rule = {} for key, value in unsorted_rule.items(): if isinstance(value, list): if value and isinstance(value[0], dict): # List of dictionaries for i, entry in enumerate(value): value[i] = self._order_lists_in_rule(entry) sorted_rule[key] = sorted(value) elif isinstance(value, dict): sorted_rule[key] = self._order_lists_in_rule(value) else: sorted_rule[key] = value return sorted_rule # TODO: clean up break up into additional methods # pylint: disable=too-many-branches def _check_rule_before_adding(self, rule): """Validates that a rule is valid and not a duplicate. Validation is based on reference: https://cloud.google.com/compute/docs/reference/beta/firewalls and https://cloud.google.com/compute/docs/vpc/firewalls#gcp_firewall_rule_summary_table If add_rule_callback is set, this will also confirm that add_rule_callback returns True for the rule, otherwise it will not add the rule. Args: rule: The rule to validate. Returns: True if rule is valid, False if the add_rule_callback returns False. Raises: DuplicateFirewallRuleNameError: Two or more rules have the same name. InvalidFirewallRuleError: One or more rules failed validation. """ unknown_keys = set(rule.keys()) - ALLOWED_RULE_ITEMS if unknown_keys: # This is probably the result of a API version upgrade that didn't # properly update this function (or a broken binary). raise InvalidFirewallRuleError( 'An unexpected entry exists in a firewall rule dict: "%s".' % ','.join(list(unknown_keys))) for key in ['name', 'network']: if key not in rule: raise InvalidFirewallRuleError( 'Rule missing required field "%s": "%s".' % (key, rule)) if 'direction' not in rule or rule['direction'] == 'INGRESS': if 'sourceRanges' not in rule and 'sourceTags' not in rule: raise InvalidFirewallRuleError( 'Ingress rule missing required field oneof ' '"sourceRanges" or "sourceTags": "%s".' % rule) if 'destinationRanges' in rule: raise InvalidFirewallRuleError( 'Ingress rules cannot include "destinationRanges": "%s".' % rule) elif rule['direction'] == 'EGRESS': if 'sourceRanges' in rule or 'sourceTags' in rule: raise InvalidFirewallRuleError( 'Egress rules cannot include "sourceRanges", "sourceTags":' '"%s".' % rule) if 'destinationRanges' not in rule: raise InvalidFirewallRuleError( 'Egress rule missing required field "destinationRanges":' '"%s".'% rule) else: raise InvalidFirewallRuleError( 'Rule "direction" must be either "INGRESS" or "EGRESS": "%s".' % rule) max_256_value_keys = set( ['sourceRanges', 'sourceTags', 'targetTags', 'destinationRanges']) for key in max_256_value_keys: if key in rule and len(rule[key]) > 256: raise InvalidFirewallRuleError( 'Rule entry "%s" must contain 256 or fewer values: "%s".' % (key, rule)) if (('allowed' not in rule and 'denied' not in rule) or ('allowed' in rule and 'denied' in rule)): raise InvalidFirewallRuleError( 'Rule must contain oneof "allowed" or "denied" entries: ' ' "%s".' % rule) if 'allowed' in rule: for allow in rule['allowed']: if 'IPProtocol' not in allow: raise InvalidFirewallRuleError( 'Allow rule in %s missing required field ' '"IPProtocol": "%s".' % (rule['name'], allow)) elif 'denied' in rule: for deny in rule['denied']: if 'IPProtocol' not in deny: raise InvalidFirewallRuleError( 'Deny rule in %s missing required field ' '"IPProtocol": "%s".' % (rule['name'], deny)) if 'priority' in rule: try: priority = int(rule['priority']) except ValueError: raise InvalidFirewallRuleError( 'Rule "priority" could not be converted to an integer: ' '"%s".' % rule) if priority < 0 or priority > 65535: raise InvalidFirewallRuleError( 'Rule "priority" out of range 0-65535: "%s".' % rule) if len(rule['name']) > 63: raise InvalidFirewallRuleError( 'Rule name exceeds length limit of 63 chars: "%s".' % rule['name']) # TODO: Verify rule name matches regex of allowed # names from reference if rule['name'] in self.rules: raise DuplicateFirewallRuleNameError( 'Rule %s already defined in rules: %s' % (rule['name'], ', '.join(sorted(self.rules.keys())))) if self._add_rule_callback: if not self._add_rule_callback(rule): return False return True # pylint: enable=too-many-branches # pylint: disable=too-many-instance-attributes # TODO: Investigate improving so we can avoid the pylint disable. class FirewallEnforcer(object): """Enforce a set of firewall rules for use with GCE projects.""" def __init__(self, project, firewall_api, expected_rules, current_rules=None, project_sema=None, operation_sema=None, add_rule_callback=None): """Constructor. Args: project: The id of the cloud project to enforce the firewall on. firewall_api: A ComputeFirewallAPI instance for interfacing with GCE API. expected_rules: A FirewallRules object with the expected rules to be enforced on the project. current_rules: A FirewallRules object with the current rules for the project. If not defined, the API will be queried and the existing rules imported into current_rules when apply_firewall is called for the project. project_sema: An optional semaphore object, used to limit the number of concurrent projects getting written to. operation_sema: An optional semaphore object, used to limit the number of concurrent write operations on project firewalls. add_rule_callback: A callback function that checks whether a firewall rule should be applied. If the callback returns False, that rule will not be modified. """ self.project = project self.firewall_api = firewall_api self.expected_rules = expected_rules if current_rules: self.current_rules = current_rules else: self.current_rules = None self.project_sema = project_sema self.operation_sema = operation_sema self._add_rule_callback = add_rule_callback # Initialize private parameters self._rules_to_delete = [] self._rules_to_insert = [] self._rules_to_update = [] self._deleted_rules = [] self._inserted_rules = [] self._updated_rules = [] def apply_firewall(self, prechange_callback=None, networks=None, allow_empty_ruleset=False): """Enforce the expected firewall rules on the project. Args: prechange_callback: An optional callback function that will get called if the firewall policy for a project does not match the expected policy, before any changes are actually applied. If the callback returns False then no changes will be made to the project. If it returns True then the changes will be pushed. If prechange_callback is set to None then the callback will be skipped and enforcement will continue as though it had returned True. The callback template is callback_func(project, rules_to_delete, rules_to_insert, rules_to_update) The callback may be used to limit the kinds of firewall changes that are allowed to be pushed for a project, limit the number of rules that can get changed, to check if the project should have rules changed, etc. The callback may also raise FirewallEnforcementFailedError if it determines that the set of changes to the policy could result in an outage for an underlying service, or otherwise are inconsistent with business rules. This will cause the enforcement to fail. networks: A list of networks to limit rule changes to. Rules on networks not in the list will not be changed. Note- This can lead to duplicate rule name collisions since all rules are not included when building the change set. The change set will be validated before getting enforced and any errors will cause a FirewallEnforcementFailedError exception to be raised. allow_empty_ruleset: If set to true and expected_rules has no rules, all current firewall rules will be deleted from the project. Returns: The total number of firewall rules deleted, inserted and updated. Raises: EmptyProposedFirewallRuleSetError: An error occurred while updating the firewall. The calling code should validate the current state of the project firewall, and potentially revert to the old firewall rules. Any rules changed before the error occured can be retrieved by calling the Get(Deleted|Inserted|Updated)Rules methods. """ # Reset change sets to empty lists self._rules_to_delete = [] self._rules_to_insert = [] self._rules_to_update = [] if not self.current_rules: self.refresh_current_rules() if not self.expected_rules.rules and not allow_empty_ruleset: raise EmptyProposedFirewallRuleSetError( 'No rules defined in the expected rules.') # Check if current rules match expected rules, so no changes are needed if networks: if (self.current_rules.filtered_by_networks(networks) == self.expected_rules.filtered_by_networks(networks)): LOGGER.info( 'Current and expected rules match for project %s on ' 'network(s) "%s".', self.project, ','.join(networks)) return 0 elif self.current_rules == self.expected_rules: LOGGER.info('Current and expected rules match for project %s.', self.project) return 0 self._build_change_set(networks) self._validate_change_set(networks) delete_before_insert = self._check_change_operation_order( len(self._rules_to_insert), len(self._rules_to_delete)) if self.project_sema: self.project_sema.acquire() try: if prechange_callback: if not prechange_callback(self.project, self._rules_to_delete, self._rules_to_insert, self._rules_to_update): LOGGER.warn( 'The Prechange Callback returned False for project %s, ' 'changes will not be applied.', self.project) return 0 changed_count = self._apply_change_set(delete_before_insert) finally: if self.project_sema: self.project_sema.release() return changed_count def refresh_current_rules(self): """Updates the current rules for the project using the compute API.""" current_rules = FirewallRules(self.project, add_rule_callback=self._add_rule_callback) current_rules.add_rules_from_api(self.firewall_api) self.current_rules = current_rules def get_deleted_rules(self): """Returns the list of deleted rules.""" return self._deleted_rules def get_inserted_rules(self): """Returns the list of inserted rules.""" return self._inserted_rules def get_updated_rules(self): """Returns the list of updated rules.""" return self._updated_rules def _build_change_set(self, networks=None): """Enumerate changes between the current and expected firewall rules.""" if networks: # Build new firewall rules objects from the subset of rules for # networks current_rules = self.current_rules.filtered_by_networks(networks) expected_rules = self.expected_rules.filtered_by_networks(networks) else: current_rules = self.current_rules.rules expected_rules = self.expected_rules.rules for rule_name in current_rules: if rule_name not in expected_rules: self._rules_to_delete.append(rule_name) for rule_name in expected_rules: if rule_name not in current_rules: self._rules_to_insert.append(rule_name) for rule_name in expected_rules: if rule_name in current_rules: if expected_rules[rule_name] != current_rules[rule_name]: self._rules_to_update.append(rule_name) def _validate_change_set(self, networks=None): """Validate the changeset will not leave the project in a bad state.""" for rule_name in self._rules_to_insert: if (rule_name in self.current_rules.rules and rule_name not in self._rules_to_delete): raise FirewallRuleValidationError( 'The rule %s is in the rules to insert set, but the same ' 'rule name already exists on project %s. It may be used on ' 'a different network.' % (rule_name, self.project)) if networks: for rule_name in self._rules_to_update: impacted_network = get_network_name_from_url( self.current_rules.rules[rule_name]['network']) if impacted_network not in networks: raise NetworkImpactValidationError( 'The rule %s is in the rules to update set, but it is ' 'currently on a network, "%s", that is not in the ' 'allowed networks list for project %s: "%s". Updating ' 'the rule to %s would impact the wrong network.' % (rule_name, impacted_network, self.project, ', '.join(networks), self.expected_rules.rules[rule_name])) def _check_change_operation_order(self, insert_count, delete_count): """Check if enough quota to do the firewall changes insert first. If current usage is near the limit, check if deleting current rules before adding the new rules would allow the project to stay below quota. Args: insert_count: The number of rules that will be inserted. delete_count: The number of rules that will be deleted. Returns: True if existing rules should be deleted before new rules are inserted, otherwise false. Raises: FirewallQuotaExceededError: Raised if there is not enough quota for the required policy to be applied. """ delete_before_insert = False firewalls_quota = self.firewall_api.get_firewalls_quota(self.project) if firewalls_quota: usage = firewalls_quota.get('usage', 0) limit = firewalls_quota.get('limit', 0) if usage + insert_count > limit: if usage - delete_count + insert_count > limit: raise FirewallQuotaExceededError( 'Firewall enforcement cannot update the policy for ' 'project %s without exceed the current firewalls ' 'quota: %u,' %(self.project, limit)) else: LOGGER.info('Switching to "delete first" rule update order ' 'for project %s.', self.project) delete_before_insert = True else: LOGGER.warn('Unknown firewall quota, switching to "delete first" ' 'rule update order for project %s.', self.project) delete_before_insert = True return delete_before_insert def _apply_change_set(self, delete_before_insert): """Updates project firewall rules based on the generated changeset. Extends self._(deleted|inserted|updated)_rules with the rules changed by these operations. Args: delete_before_insert: If true, delete operations are completed before inserts. Otherwise insert operations are completed first. Returns: The total number of firewall rules deleted, inserted and updated. Raises: FirewallEnforcementFailedError: Raised if one or more changes fails. """ change_count = 0 if delete_before_insert: change_count += self._delete_rules() change_count += self._insert_rules() else: change_count += self._insert_rules() change_count += self._delete_rules() change_count += self._update_rules() return change_count def _insert_rules(self): """Insert new rules into the project firewall.""" change_count = 0 if self._rules_to_insert: LOGGER.info('Inserting rules: %s', ', '.join(self._rules_to_insert)) rules = [ self.expected_rules.rules[rule_name] for rule_name in self._rules_to_insert ] insert_function = self.firewall_api.insert_firewall_rule (successes, failures, change_errors) = self._apply_change( insert_function, rules) self._inserted_rules.extend(successes) change_count += len(successes) if failures: raise FirewallEnforcementInsertFailedError( 'Firewall enforcement failed while inserting rules for ' 'project {}. The following errors were encountered: {}' .format(self.project, change_errors)) return change_count def _delete_rules(self): """Delete old rules from the project firewall.""" change_count = 0 if self._rules_to_delete: LOGGER.info('Deleting rules: %s', ', '.join(self._rules_to_delete)) rules = [ self.current_rules.rules[rule_name] for rule_name in self._rules_to_delete ] delete_function = self.firewall_api.delete_firewall_rule (successes, failures, change_errors) = self._apply_change( delete_function, rules) self._deleted_rules.extend(successes) change_count += len(successes) if failures: raise FirewallEnforcementDeleteFailedError( 'Firewall enforcement failed while deleting rules for ' 'project {}. The following errors were encountered: {}' .format(self.project, change_errors)) return change_count def _update_rules(self): """Update existing rules in the project firewall.""" change_count = 0 if self._rules_to_update: LOGGER.info('Updating rules: %s', ', '.join(self._rules_to_update)) rules = [ self.expected_rules.rules[rule_name] for rule_name in self._rules_to_update ] update_function = self.firewall_api.update_firewall_rule (successes, failures, change_errors) = self._apply_change( update_function, rules) self._updated_rules.extend(successes) change_count += len(successes) if failures: raise FirewallEnforcementUpdateFailedError( 'Firewall enforcement failed while deleting rules for ' 'project {}. The following errors were encountered: {}' .format(self.project, change_errors)) return change_count # pylint: disable=too-many-statements,too-many-branches,too-many-locals # TODO: Look at not having some of these disables. def _apply_change(self, firewall_function, rules): """Modify the firewall using the passed in function and rules. If self.operation_sema is defined, then the number of outstanding changes is limited to the number of semaphore locks that can be acquired. Args: firewall_function: The delete|insert|update function to call for this set of rules rules: A list of rules to pass to the firewall_function. Returns: A tuple with the rules successfully changed by this function and the rules that failed. """ applied_rules = [] failed_rules = [] change_errors = [] if not rules: return (applied_rules, failed_rules, change_errors) successes = [] failures = [] running_operations = [] finished_operations = [] operations = {} for rule in rules: if self.operation_sema: if not self.operation_sema.acquire(False): # Non-blocking # No semaphore available, wait for one or more ops to # complete. if running_operations: (completed, running_operations) = ( self.firewall_api.wait_for_any_to_complete( self.project, running_operations, OPERATION_TIMEOUT)) finished_operations.extend(completed) for response in completed: self.operation_sema.release() self.operation_sema.acquire(True) # Blocking try: response = firewall_function(self.project, rule) except errors.HttpError as e: LOGGER.error( 'Error changing firewall rule %s for project %s: %s', rule.get('name', ''), self.project, e) error_str = 'Rule: %s\nError: %s' % (rule.get('name', ''), e) change_errors.append(error_str) failed_rules.append(rule) if self.operation_sema: self.operation_sema.release() continue if 'name' in response: operations[response['name']] = rule running_operations.append(response) else: LOGGER.error('The response object returned by %r(%s, %s) is ' 'invalid. It does not contain a "name" key: %s', firewall_function, self.project, json.dumps(rule), json.dumps(response)) failed_rules.append(rule) if self.operation_sema: self.operation_sema.release() responses = self.firewall_api.wait_for_all_to_complete( self.project, running_operations, OPERATION_TIMEOUT) finished_operations.extend(responses) if self.operation_sema: for response in responses: self.operation_sema.release() for response in finished_operations: if self.firewall_api.is_successful(response): successes.append(response) else: failures.append(response) for result in successes: operation_name = result.get('name', '') if operation_name in operations: applied_rules.append(operations[operation_name]) else: LOGGER.warn( 'Successful result contained an unknown operation name, ' '"%s": %s', operation_name, json.dumps(result)) for result in failures: operation_name = result.get('name', '') if operation_name in operations: LOGGER.error( 'The firewall rule %s for project %s received the ' 'following error response during the last operation: %s', operations[operation_name], self.project, json.dumps(result)) failed_rules.append(operations[operation_name]) else: LOGGER.warn( 'Failure result contained an unknown operation name, ' '"%s": %s', operation_name, json.dumps(result)) return (applied_rules, failed_rules, change_errors)
1
29,382
Why this indentation change? Seems the original indentation would be fine, and leaves more room.
forseti-security-forseti-security
py
@@ -145,7 +145,7 @@ class MultiDimensionalMapping(Dimensioned): raise KeyError('Key has to match number of dimensions.') - def _add_item(self, dim_vals, data, sort=True): + def _add_item(self, dim_vals, data, sort=True, update=True): """ Adds item to the data, applying dimension types and ensuring key conforms to Dimension type and values.
1
""" Supplies MultiDimensionalMapping and NdMapping which are multi-dimensional map types. The former class only allows indexing whereas the latter also enables slicing over multiple dimension ranges. """ from collections import Sequence from itertools import cycle from operator import itemgetter import numpy as np import param from . import traversal from .dimension import OrderedDict, Dimension, Dimensioned, ViewableElement from .util import unique_iterator, sanitize_identifier, dimension_sort, group_select, iterative_select class item_check(object): """ Context manager to allow creating NdMapping types without performing the usual item_checks, providing significant speedups when there are a lot of items. Should only be used when both keys and values are guaranteed to be the right type, as is the case for many internal operations. """ def __init__(self, enabled): self.enabled = enabled def __enter__(self): self._enabled = MultiDimensionalMapping._check_items MultiDimensionalMapping._check_items = self.enabled def __exit__(self, exc_type, exc_val, exc_tb): MultiDimensionalMapping._check_items = self._enabled class sorted_context(object): """ Context manager to allow creating NdMapping types without performing the usual sorting, providing significant speedups when there are a lot of items. Should only be used if values are guaranteed to be sorted before or after the operation is performed. """ def __init__(self, enabled): self.enabled = enabled def __enter__(self): self._enabled = MultiDimensionalMapping._sorted MultiDimensionalMapping._sorted = self.enabled def __exit__(self, exc_type, exc_val, exc_tb): MultiDimensionalMapping._sorted = self._enabled class MultiDimensionalMapping(Dimensioned): """ An MultiDimensionalMapping is a Dimensioned mapping (like a dictionary or array) that uses fixed-length multidimensional keys. This behaves like a sparse N-dimensional array that does not require a dense sampling over the multidimensional space. If the underlying value for each (key,value) pair also supports indexing (such as a dictionary, array, or list), fully qualified (deep) indexing may be used from the top level, with the first N dimensions of the index selecting a particular Dimensioned object and the remaining dimensions indexing into that object. For instance, for a MultiDimensionalMapping with dimensions "Year" and "Month" and underlying values that are 2D floating-point arrays indexed by (r,c), a 2D array may be indexed with x[2000,3] and a single floating-point number may be indexed as x[2000,3,1,9]. In practice, this class is typically only used as an abstract base class, because the NdMapping subclass extends it with a range of useful slicing methods for selecting subsets of the data. Even so, keeping the slicing support separate from the indexing and data storage methods helps make both classes easier to understand. """ group = param.String(default='MultiDimensionalMapping', constant=True) kdims = param.List(default=[Dimension("Default")], constant=True) data_type = None # Optional type checking of elements _deep_indexable = False _sorted = True _check_items = True def __init__(self, initial_items=None, **params): if isinstance(initial_items, NdMapping): map_type = type(initial_items) own_params = self.params() new_params = dict(initial_items.get_param_values(onlychanged=True)) if new_params.get('group') == map_type.__name__: new_params.pop('group') params = dict({name: value for name, value in new_params.items() if name in own_params}, **params) super(MultiDimensionalMapping, self).__init__(OrderedDict(), **params) self._next_ind = 0 self._check_key_type = True self._cached_index_types = [d.type for d in self.kdims] self._cached_index_values = {d.name:d.values for d in self.kdims} self._cached_categorical = any(d.values for d in self.kdims) self._instantiated = not any(v == 'initial' for v in self._cached_index_values.values()) if initial_items is None: initial_items = [] if isinstance(initial_items, tuple): self._add_item(initial_items[0], initial_items[1]) elif not self._check_items and self._instantiated: if isinstance(initial_items, dict): initial_items = initial_items.items() elif isinstance(initial_items, MultiDimensionalMapping): initial_items = initial_items.data.items() self.data = OrderedDict((k if isinstance(k, tuple) else (k,), v) for k, v in initial_items) self._resort() elif initial_items is not None: self.update(OrderedDict(initial_items)) self._instantiated = True def _item_check(self, dim_vals, data): """ Applies optional checks to individual data elements before they are inserted ensuring that they are of a certain type. Subclassed may implement further element restrictions. """ if self.data_type is not None and not isinstance(data, self.data_type): if isinstance(self.data_type, tuple): data_type = tuple(dt.__name__ for dt in self.data_type) else: data_type = self.data_type.__name__ raise TypeError('{slf} does not accept {data} type, data elements have ' 'to be a {restr}.'.format(slf=type(self).__name__, data=type(data).__name__, restr=data_type)) elif not len(dim_vals) == self.ndims: raise KeyError('Key has to match number of dimensions.') def _add_item(self, dim_vals, data, sort=True): """ Adds item to the data, applying dimension types and ensuring key conforms to Dimension type and values. """ if not isinstance(dim_vals, tuple): dim_vals = (dim_vals,) self._item_check(dim_vals, data) # Apply dimension types dim_types = zip(self._cached_index_types, dim_vals) dim_vals = tuple(v if None in [t, v] else t(v) for t, v in dim_types) # Check and validate for categorical dimensions if self._cached_categorical: valid_vals = zip(self.kdims, dim_vals) else: valid_vals = [] for dim, val in valid_vals: vals = self._cached_index_values[dim.name] if vals == 'initial': self._cached_index_values[dim.name] = [] if not self._instantiated and self.get_dimension(dim).values == 'initial': if val not in vals: self._cached_index_values[dim.name].append(val) elif vals and val not in vals: raise KeyError('%s dimension value %s not in' ' specified dimension values.' % (dim, repr(val))) # Updates nested data structures rather than simply overriding them. if ((dim_vals in self.data) and isinstance(self.data[dim_vals], (NdMapping, OrderedDict))): self.data[dim_vals].update(data) else: self.data[dim_vals] = data if sort: self._resort() def _apply_key_type(self, keys): """ If a type is specified by the corresponding key dimension, this method applies the type to the supplied key. """ typed_key = () for dim, key in zip(self.kdims, keys): key_type = dim.type if key_type is None: typed_key += (key,) elif isinstance(key, slice): sl_vals = [key.start, key.stop, key.step] typed_key += (slice(*[key_type(el) if el is not None else None for el in sl_vals]),) elif key is Ellipsis: typed_key += (key,) elif isinstance(key, list): typed_key += ([key_type(k) for k in key],) else: typed_key += (key_type(key),) return typed_key def _split_index(self, key): """ Partitions key into key and deep dimension groups. If only key indices are supplied, the data is indexed with an empty tuple. Keys with indices than there are dimensions will be padded. """ if not isinstance(key, tuple): key = (key,) if key[0] is Ellipsis: num_pad = self.ndims - len(key) + 1 key = (slice(None),) * num_pad + key[1:] elif len(key) < self.ndims: num_pad = self.ndims - len(key) key = key + (slice(None),) * num_pad map_slice = key[:self.ndims] if self._check_key_type: map_slice = self._apply_key_type(map_slice) if len(key) == self.ndims: return map_slice, () else: return map_slice, key[self.ndims:] def _dataslice(self, data, indices): """ Returns slice of data element if the item is deep indexable. Warns if attempting to slice an object that has not been declared deep indexable. """ if isinstance(data, Dimensioned) and indices: return data[indices] elif len(indices) > 0: self.warning('Cannot index into data element, extra data' ' indices ignored.') return data def _resort(self): if self._sorted: resorted = dimension_sort(self.data, self.kdims, self.vdims, self._cached_categorical, range(self.ndims), self._cached_index_values) self.data = OrderedDict(resorted) def clone(self, data=None, shared_data=True, *args, **overrides): """ Overrides Dimensioned clone to avoid checking items if data is unchanged. """ with item_check(not shared_data and self._check_items): return super(MultiDimensionalMapping, self).clone(data, shared_data, *args, **overrides) def groupby(self, dimensions, container_type=None, group_type=None, **kwargs): """ Splits the mapping into groups by key dimension which are then returned together in a mapping of class container_type. The individual groups are of the same type as the original map. """ if self.ndims == 1: self.warning('Cannot split Map with only one dimension.') return self dimensions = [self.get_dimension(d).name for d in dimensions] container_type = container_type if container_type else type(self) group_type = group_type if group_type else type(self) dims, inds = zip(*((self.get_dimension(dim), self.get_dimension_index(dim)) for dim in dimensions)) inames, idims = zip(*((dim.name, dim) for dim in self.kdims if not dim.name in dimensions)) selects = unique_iterator(itemgetter(*inds)(key) if len(inds) > 1 else (key[inds[0]],) for key in self.data.keys()) with item_check(False): selects = group_select(list(selects)) groups = [(k, group_type((v.reindex(inames) if isinstance(v, NdMapping) else [((), (v,))]), **kwargs)) for k, v in iterative_select(self, dimensions, selects)] return container_type(groups, kdims=dims) def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs): """ Create a new object with an additional key dimensions. Requires the dimension name or object, the desired position in the key dimensions and a key value scalar or sequence of the same length as the existing keys. """ if not isinstance(dimension, Dimension): dimension = Dimension(dimension) if dimension in self.dimensions(): raise Exception('{dim} dimension already defined'.format(dim=dimension.name)) if vdim and self._deep_indexable: raise Exception('Cannot add value dimension to object that is deep indexable') if vdim: dims = self.vdims[:] dims.insert(dim_pos, dimension) dimensions = dict(vdims=dims) dim_pos += self.ndims else: dims = self.kdims[:] dims.insert(dim_pos, dimension) dimensions = dict(kdims=dims) if np.isscalar(dim_val): dim_val = cycle([dim_val]) else: if not len(dim_val) == len(self): raise ValueError("Added dimension values must be same length" "as existing keys.") items = OrderedDict() for dval, (key, val) in zip(dim_val, self.data.items()): if vdim: new_val = list(val) new_val.insert(dim_pos, dval) items[key] = tuple(new_val) else: new_key = list(key) new_key.insert(dim_pos, dval) items[tuple(new_key)] = val return self.clone(items, **dict(dimensions, **kwargs)) def drop_dimension(self, dimensions): """ Returns a new mapping with the named dimension(s) removed. """ dimensions = [dimensions] if np.isscalar(dimensions) else dimensions dims = [d for d in self.kdims if d not in dimensions] dim_inds = [self.get_dimension_index(d) for d in dims] key_getter = itemgetter(*dim_inds) return self.clone([(key_getter(k), v) for k, v in self.data.items()], kdims=dims) def dimension_values(self, dimension): "Returns the values along the specified dimension." dimension = self.get_dimension(dimension).name if dimension in self.kdims: return np.array([k[self.get_dimension_index(dimension)] for k in self.data.keys()]) if dimension in self.dimensions(label=True): values = [el.dimension_values(dimension) for el in self if dimension in el.dimensions()] return np.concatenate(values) else: return super(MultiDimensionalMapping, self).dimension_values(dimension) def reindex(self, kdims=[], force=False): """ Create a new object with a re-ordered or reduced set of key dimensions. Reducing the number of key dimensions will discard information from the keys. All data values are accessible in the newly created object as the new labels must be sufficient to address each value uniquely. """ old_kdims = [d.name for d in self.kdims] if not len(kdims): kdims = [d for d in old_kdims if not len(set(self.dimension_values(d))) == 1] indices = [self.get_dimension_index(el) for el in kdims] keys = [tuple(k[i] for i in indices) for k in self.data.keys()] reindexed_items = OrderedDict( (k, v) for (k, v) in zip(keys, self.data.values())) reduced_dims = set([d.name for d in self.kdims]).difference(kdims) dimensions = [self.get_dimension(d) for d in kdims if d not in reduced_dims] if len(set(keys)) != len(keys) and not force: raise Exception("Given dimension labels not sufficient" "to address all values uniquely") if len(keys): cdims = {self.get_dimension(d): self.dimension_values(d)[0] for d in reduced_dims} else: cdims = {} with item_check(indices == sorted(indices)): return self.clone(reindexed_items, kdims=dimensions, cdims=cdims) @property def last(self): "Returns the item highest data item along the map dimensions." return list(self.data.values())[-1] if len(self) else None @property def last_key(self): "Returns the last key value." return list(self.keys())[-1] if len(self) else None @property def info(self): """ Prints information about the Dimensioned object, including the number and type of objects contained within it and information about its dimensions. """ info_str = self.__class__.__name__ +\ " containing %d items of type %s\n" % (len(self.keys()), type(self.values()[0]).__name__) info_str += ('-' * (len(info_str)-1)) + "\n\n" aliases = {v: k for k, v in self._dim_aliases.items()} for group in self._dim_groups: dimensions = getattr(self, group) if dimensions: group = aliases[group].split('_')[0] info_str += '%s Dimensions: \n' % group.capitalize() for d in dimensions: dmin, dmax = self.range(d.name) if d.formatter: dmin, dmax = d.formatter(dmin), d.formatter(dmax) info_str += '\t %s: %s...%s \n' % (str(d), dmin, dmax) print(info_str) def table(self, datatype=None, **kwargs): "Creates a table from the stored keys and data." datatype = ['ndelement', 'dataframe'] tables = [] for key, value in self.data.items(): value = value.table(datatype=datatype, **kwargs) for idx, (dim, val) in enumerate(zip(self.kdims, key)): value = value.add_dimension(dim, idx, val) tables.append(value) return value.interface.concatenate(tables) def dframe(self): "Creates a pandas DataFrame from the stored keys and data." try: import pandas except ImportError: raise Exception("Cannot build a DataFrame without the pandas library.") labels = self.dimensions('key', True) + [self.group] return pandas.DataFrame( [dict(zip(labels, k + (v,))) for (k, v) in self.data.items()]) def update(self, other): """ Updates the current mapping with some other mapping or OrderedDict instance, making sure that they are indexed along the same set of dimensions. The order of key dimensions remains unchanged after the update. """ if isinstance(other, NdMapping): dims = [d for d in other.kdims if d not in self.kdims] if len(dims) == other.ndims: raise KeyError("Cannot update with NdMapping that has" " a different set of key dimensions.") elif dims: other = other.drop_dimension(dims) other = other.data for key, data in other.items(): self._add_item(key, data, sort=False) self._resort() def keys(self): " Returns the keys of all the elements." if self.ndims == 1: return [k[0] for k in self.data.keys()] else: return list(self.data.keys()) def values(self): " Returns the values of all the elements." return list(self.data.values()) def items(self): "Returns all elements as a list in (key,value) format." return list(zip(list(self.keys()), list(self.values()))) def get(self, key, default=None): "Standard get semantics for all mapping types" try: if key is None: return None return self[key] except: return default def pop(self, key, default=None): "Standard pop semantics for all mapping types" if not isinstance(key, tuple): key = (key,) return self.data.pop(key, default) def __getitem__(self, key): """ Allows multi-dimensional indexing in the order of the specified key dimensions, passing any additional indices to the data elements. """ if key in [Ellipsis, ()]: return self map_slice, data_slice = self._split_index(key) return self._dataslice(self.data[map_slice], data_slice) def __setitem__(self, key, value): self._add_item(key, value) def __str__(self): return repr(self) def __iter__(self): return iter(self.values()) def __contains__(self, key): if self.ndims == 1: return key in self.data.keys() else: return key in self.keys() def __len__(self): return len(self.data) class NdMapping(MultiDimensionalMapping): """ NdMapping supports the same indexing semantics as MultiDimensionalMapping but also supports slicing semantics. Slicing semantics on an NdMapping is dependent on the ordering semantics of the keys. As MultiDimensionalMapping sort the keys, a slice on an NdMapping is effectively a way of filtering out the keys that are outside the slice range. """ group = param.String(default='NdMapping', constant=True) def __getitem__(self, indexslice): """ Allows slicing operations along the key and data dimensions. If no data slice is supplied it will return all data elements, otherwise it will return the requested slice of the data. """ if indexslice in [Ellipsis, ()]: return self elif isinstance(indexslice, np.ndarray) and indexslice.dtype.kind == 'b': if not len(indexslice) == len(self): raise IndexError("Boolean index must match length of sliced object") selection = zip(indexslice, self.data.items()) return self.clone([item for c, item in selection if c]) map_slice, data_slice = self._split_index(indexslice) map_slice = self._transform_indices(map_slice) map_slice = self._expand_slice(map_slice) if all(not isinstance(el, (slice, set, list, tuple)) for el in map_slice): return self._dataslice(self.data[map_slice], data_slice) else: conditions = self._generate_conditions(map_slice) items = self.data.items() for cidx, (condition, dim) in enumerate(zip(conditions, self.kdims)): values = self._cached_index_values.get(dim.name, None) items = [(k, v) for k, v in items if condition(values.index(k[cidx]) if values else k[cidx])] sliced_items = [] for k, v in items: val_slice = self._dataslice(v, data_slice) if val_slice or isinstance(val_slice, tuple): sliced_items.append((k, val_slice)) if len(sliced_items) == 0: raise KeyError('No items within specified slice.') with item_check(False): return self.clone(sliced_items) def _expand_slice(self, indices): """ Expands slices containing steps into a list. """ keys = list(self.data.keys()) expanded = [] for idx, ind in enumerate(indices): if isinstance(ind, slice) and ind.step is not None: dim_ind = slice(ind.start, ind.stop) if dim_ind == slice(None): condition = self._all_condition() elif dim_ind.start is None: condition = self._upto_condition(dim_ind) elif dim_ind.stop is None: condition = self._from_condition(dim_ind) else: condition = self._range_condition(dim_ind) dim_vals = unique_iterator(k[idx] for k in keys) expanded.append(set([k for k in dim_vals if condition(k)][::int(ind.step)])) else: expanded.append(ind) return tuple(expanded) def _transform_indices(self, indices): """ Identity function here but subclasses can implement transforms of the dimension indices from one coordinate system to another. """ return indices def _generate_conditions(self, map_slice): """ Generates filter conditions used for slicing the data structure. """ conditions = [] for dim, dim_slice in zip(self.kdims, map_slice): if isinstance(dim_slice, slice): start, stop = dim_slice.start, dim_slice.stop if dim.values: values = self._cached_index_values[dim.name] dim_slice = slice(None if start is None else values.index(start), None if stop is None else values.index(stop)) if dim_slice == slice(None): conditions.append(self._all_condition()) elif start is None: conditions.append(self._upto_condition(dim_slice)) elif stop is None: conditions.append(self._from_condition(dim_slice)) else: conditions.append(self._range_condition(dim_slice)) elif isinstance(dim_slice, (set, list)): if dim.values: dim_slice = [self._cached_index_values[dim.name].index(dim_val) for dim_val in dim_slice] conditions.append(self._values_condition(dim_slice)) elif dim_slice is Ellipsis: conditions.append(self._all_condition()) elif isinstance(dim_slice, (tuple)): raise IndexError("Keys may only be selected with sets or lists, not tuples.") else: if dim.values: dim_slice = self._cached_index_values[dim.name].index(dim_slice) conditions.append(self._value_condition(dim_slice)) return conditions def _value_condition(self, value): return lambda x: x == value def _values_condition(self, values): return lambda x: x in values def _range_condition(self, slice): if slice.step is None: lmbd = lambda x: slice.start <= x < slice.stop else: lmbd = lambda x: slice.start <= x < slice.stop and not ( (x-slice.start) % slice.step) return lmbd def _upto_condition(self, slice): if slice.step is None: lmbd = lambda x: x < slice.stop else: lmbd = lambda x: x < slice.stop and not (x % slice.step) return lmbd def _from_condition(self, slice): if slice.step is None: lmbd = lambda x: x > slice.start else: lmbd = lambda x: x > slice.start and ((x-slice.start) % slice.step) return lmbd def _all_condition(self): return lambda x: True class UniformNdMapping(NdMapping): """ A UniformNdMapping is a map of Dimensioned objects and is itself indexed over a number of specified dimensions. The dimension may be a spatial dimension (i.e., a ZStack), time (specifying a frame sequence) or any other combination of Dimensions. UniformNdMapping objects can be sliced, sampled, reduced, overlaid and split along its and its containing Views dimensions. Subclasses should implement the appropriate slicing, sampling and reduction methods for their Dimensioned type. """ data_type = (ViewableElement, NdMapping) _abstract = True _deep_indexable = True _auxiliary_component = False def __init__(self, initial_items=None, group=None, label=None, **params): self._type = None self._group_check, self.group = None, group self._label_check, self.label = None, label super(UniformNdMapping, self).__init__(initial_items, **params) def clone(self, data=None, shared_data=True, *args, **overrides): """ Returns a clone of the object with matching parameter values containing the specified args and kwargs. If shared_data is set to True and no data explicitly supplied, the clone will share data with the original. """ settings = dict(self.get_param_values()) if settings.get('group', None) != self._group: settings.pop('group') if settings.get('label', None) != self._label: settings.pop('label') settings.update(overrides) if data is None and shared_data: data = self.data with item_check(not shared_data and self._check_items): return self.__class__(data, *args, **settings) @property def group(self): if self._group: return self._group else: vals = self.values() groups = {v.group for v in vals if not v._auxiliary_component} if len(groups) == 1: tp = type(vals[0]).__name__ group = list(groups)[0] if tp != group: return group return type(self).__name__ @group.setter def group(self, group): if group is not None and not sanitize_identifier.allowable(group): raise ValueError("Supplied group %s contains invalid " "characters." % self.group) self._group = group @property def label(self): if self._label: return self._label else: labels = {v.label for v in self.values() if not v._auxiliary_component} if len(labels) == 1: return list(labels)[0] else: return '' @label.setter def label(self, label): if label is not None and not sanitize_identifier.allowable(label): raise ValueError("Supplied group %s contains invalid " "characters." % self.group) self._label = label @property def type(self): """ The type of elements stored in the map. """ if self._type is None and len(self): self._type = self.values()[0].__class__ return self._type @property def empty_element(self): return self.type(None) def _item_check(self, dim_vals, data): if self.type is not None and (type(data) != self.type): raise AssertionError("%s must only contain one type of object, not both %s and %s." % (self.__class__.__name__, type(data).__name__, self.type.__name__)) if not traversal.uniform(NdMapping([(0, self), (1, data)])): raise ValueError("HoloMaps dimensions must be consistent in %s." % type(self).__name__) super(UniformNdMapping, self)._item_check(dim_vals, data) def dframe(self): """ Gets a dframe for each Element in the HoloMap, appends the dimensions of the HoloMap as series and concatenates the dframes. """ import pandas dframes = [] for key, view in self.data.items(): view_frame = view.dframe() key_dims = reversed(list(zip(key, self.dimensions('key', True)))) for val, dim in key_dims: dimn = 1 while dim in view_frame: dim = dim+'_%d' % dimn if dim in view_frame: dimn += 1 view_frame.insert(0, dim, val) dframes.append(view_frame) return pandas.concat(dframes)
1
13,867
Would be nice to know what the `update` argument does in the docstring.
holoviz-holoviews
py
@@ -1,10 +1,10 @@ -import StringIO from pathod import log from netlib.exceptions import TcpDisconnect -import netlib.tcp +from six.moves import cStringIO -class DummyIO(StringIO.StringIO): + +class DummyIO(cStringIO): def start_log(self, *args, **kwargs): pass
1
import StringIO from pathod import log from netlib.exceptions import TcpDisconnect import netlib.tcp class DummyIO(StringIO.StringIO): def start_log(self, *args, **kwargs): pass def get_log(self, *args, **kwargs): return "" def test_disconnect(): outf = DummyIO() rw = DummyIO() l = log.ConnectionLogger(outf, False, rw, rw) try: with l.ctx() as lg: lg("Test") except TcpDisconnect: pass assert "Test" in outf.getvalue()
1
11,479
I think we can just use `io.BytesIO` here.
mitmproxy-mitmproxy
py
@@ -155,13 +155,14 @@ func NewGCPgRPCConn(ctx context.Context, t *testing.T, endPoint, api string) (*g } // NewAzureTestPipeline creates a new connection for testing against Azure Blob. -func NewAzureTestPipeline(ctx context.Context, t *testing.T, accountName string, accountKey string) (pipeline pipeline.Pipeline, done func(), httpClient *http.Client) { +func NewAzureTestPipeline(ctx context.Context, t *testing.T, api, accountName, accountKey string) (pipeline pipeline.Pipeline, done func(), httpClient *http.Client) { mode := recorder.ModeReplaying if *Record { mode = recorder.ModeRecording } azMatchers := &replay.ProviderMatcher{ + Headers: []string{"User-Agent"}, URLScrubbers: []*regexp.Regexp{ regexp.MustCompile(`se=[^?]*`), regexp.MustCompile(`sig=[^?]*`),
1
package setup // import "gocloud.dev/internal/testing/setup" import ( "context" "flag" "fmt" "net" "net/http" "regexp" "testing" "github.com/aws/aws-sdk-go/aws" awscreds "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/dnaeon/go-vcr/recorder" "gocloud.dev/gcp" "gocloud.dev/internal/testing/replay" "gocloud.dev/internal/useragent" "google.golang.org/grpc" grpccreds "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/oauth" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-blob-go/azblob" ) // Record is true iff the tests are being run in "record" mode. var Record = flag.Bool("record", false, "whether to run tests against cloud resources and record the interactions") // NewAWSSession creates a new session for testing against AWS. // If the test is in --record mode, the test will call out to AWS, and the // results are recorded in a replay file. // Otherwise, the session reads a replay file and runs the test as a replay, // which never makes an outgoing HTTP call and uses fake credentials. func NewAWSSession(t *testing.T, region string) (sess *session.Session, rt http.RoundTripper, done func()) { mode := recorder.ModeReplaying if *Record { mode = recorder.ModeRecording } awsMatcher := &replay.ProviderMatcher{ URLScrubbers: []*regexp.Regexp{ regexp.MustCompile(`X-Amz-(Credential|Signature)=[^?]*`), }, Headers: []string{"X-Amz-Target"}, } r, done, err := replay.NewRecorder(t, mode, awsMatcher, t.Name()) if err != nil { t.Fatalf("unable to initialize recorder: %v", err) } client := &http.Client{Transport: r} // Provide fake creds if running in replay mode. var creds *awscreds.Credentials if !*Record { creds = awscreds.NewStaticCredentials("FAKE_ID", "FAKE_SECRET", "FAKE_TOKEN") } sess, err = session.NewSession(&aws.Config{ HTTPClient: client, Region: aws.String(region), Credentials: creds, MaxRetries: aws.Int(0), }) if err != nil { t.Fatal(err) } return sess, r, done } // NewGCPClient creates a new HTTPClient for testing against GCP. // If the test is in --record mode, the client will call out to GCP, and the // results are recorded in a replay file. // Otherwise, the session reads a replay file and runs the test as a replay, // which never makes an outgoing HTTP call and uses fake credentials. func NewGCPClient(ctx context.Context, t *testing.T) (client *gcp.HTTPClient, rt http.RoundTripper, done func()) { mode := recorder.ModeReplaying if *Record { mode = recorder.ModeRecording } // GFEs scrub X-Google- and X-GFE- headers from requests and responses. // Drop them from recordings made by users inside Google. // http://g3doc/gfe/g3doc/gfe3/design/http_filters/google_header_filter // (internal Google documentation). gfeDroppedHeaders := regexp.MustCompile("^X-(Google|GFE)-") gcpMatcher := &replay.ProviderMatcher{ Headers: []string{"User-Agent"}, DropRequestHeaders: gfeDroppedHeaders, DropResponseHeaders: gfeDroppedHeaders, URLScrubbers: []*regexp.Regexp{ regexp.MustCompile(`Expires=[^?]*`), }, BodyScrubbers: []*regexp.Regexp{regexp.MustCompile(`(?m)^\s*--.*$`)}, } r, done, err := replay.NewRecorder(t, mode, gcpMatcher, t.Name()) if err != nil { t.Fatalf("unable to initialize recorder: %v", err) } if *Record { creds, err := gcp.DefaultCredentials(ctx) if err != nil { t.Fatalf("failed to get default credentials: %v", err) } client, err = gcp.NewHTTPClient(r, gcp.CredentialsTokenSource(creds)) if err != nil { t.Fatal(err) } } else { client = &gcp.HTTPClient{Client: http.Client{Transport: r}} } return client, r, done } // NewGCPgRPCConn creates a new connection for testing against GCP via gRPC. // If the test is in --record mode, the client will call out to GCP, and the // results are recorded in a replay file. // Otherwise, the session reads a replay file and runs the test as a replay, // which never makes an outgoing RPC and uses fake credentials. func NewGCPgRPCConn(ctx context.Context, t *testing.T, endPoint, api string) (*grpc.ClientConn, func()) { mode := recorder.ModeReplaying if *Record { mode = recorder.ModeRecording } opts, done := replay.NewGCPDialOptions(t, mode, t.Name()+".replay") opts = append(opts, useragent.GRPCDialOption(api)) if mode == recorder.ModeRecording { // Add credentials for real RPCs. creds, err := gcp.DefaultCredentials(ctx) if err != nil { t.Fatal(err) } opts = append(opts, grpc.WithTransportCredentials(grpccreds.NewClientTLSFromCert(nil, ""))) opts = append(opts, grpc.WithPerRPCCredentials(oauth.TokenSource{TokenSource: gcp.CredentialsTokenSource(creds)})) } else { // Establish a local listener for Dial to connect to and update endPoint // to point to it. l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatal(err) } endPoint = l.Addr().String() opts = append(opts, grpc.WithInsecure()) } conn, err := grpc.DialContext(ctx, endPoint, opts...) if err != nil { t.Fatal(err) } return conn, done } // NewAzureTestPipeline creates a new connection for testing against Azure Blob. func NewAzureTestPipeline(ctx context.Context, t *testing.T, accountName string, accountKey string) (pipeline pipeline.Pipeline, done func(), httpClient *http.Client) { mode := recorder.ModeReplaying if *Record { mode = recorder.ModeRecording } azMatchers := &replay.ProviderMatcher{ URLScrubbers: []*regexp.Regexp{ regexp.MustCompile(`se=[^?]*`), regexp.MustCompile(`sig=[^?]*`), }, } r, done, err := replay.NewRecorder(t, mode, azMatchers, t.Name()) if err != nil { t.Fatalf("unable to initialize recorder: %v", err) } var credential azblob.Credential if *Record { credential, _ = azblob.NewSharedKeyCredential(accountName, accountKey) } else { credential = azblob.NewAnonymousCredential() } httpClient = azureHTTPClient(r) p := newPipeline(credential, r) return p, done, httpClient } func newPipeline(c azblob.Credential, r *recorder.Recorder) pipeline.Pipeline { if c == nil { panic("pipeline credential can't be nil") } f := []pipeline.Factory{ // sets User-Agent for recorder azblob.NewTelemetryPolicyFactory(azblob.TelemetryOptions{ Value: "X-Az-Target", }), // sets header X-Ms-Client-Request-Id, see https://msdn.microsoft.com/en-us/library/mt766820.aspx azblob.NewUniqueRequestIDPolicyFactory(), } f = append(f, c) f = append(f, pipeline.MethodFactoryMarker()) log := pipeline.LogOptions{ Log: func(level pipeline.LogLevel, message string) { fmt.Println(message) }, ShouldLog: func(level pipeline.LogLevel) bool { return true }, } return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: newDefaultHTTPClientFactory(azureHTTPClient(r)), Log: log}) } func newDefaultHTTPClientFactory(pipelineHTTPClient *http.Client) pipeline.Factory { return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { r, err := pipelineHTTPClient.Do(request.WithContext(ctx)) if err != nil { err = pipeline.NewError(err, "HTTP request failed") } return pipeline.NewHTTPResponse(r), err } }) } func azureHTTPClient(r *recorder.Recorder) *http.Client { if r != nil { return &http.Client{Transport: r} } else { return &http.Client{} } }
1
13,515
This ensures that the User-Agent header matches during replay.
google-go-cloud
go
@@ -3517,6 +3517,7 @@ XDDkwd__(SUBQUERY_UNNESTING, "ON"), XDDui1__(USTAT_MIN_ROWCOUNT_FOR_LOW_SAMPLE, "1000000"), XDDui1__(USTAT_MIN_ROWCOUNT_FOR_SAMPLE, "10000"), DDflt0_(USTAT_MODIFY_DEFAULT_UEC, "0.05"), + DDflt0_(USTAT_NAHEAP_ESTIMATED_MAX, "1.3"), // estimated max memory allocation feasible with NAHEAP. XDDui1__(USTAT_NECESSARY_SAMPLE_MAX, "5000000"), // Maximum sample size with NECESSARY DDui1__(USTAT_NUM_MC_GROUPS_FOR_KEYS, "10"), XDDpct__(USTAT_OBSOLETE_PERCENT_ROWCOUNT, "15"),
1
/* -*-C++-*- // @@@ START COPYRIGHT @@@ // // (C) Copyright 1996-2015 Hewlett-Packard Development Company, L.P. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // @@@ END COPYRIGHT @@@ ***************************************************************************** * * File: NADefaults.cpp * Description: Implementation for the defaults table class, NADefaults. * * Created: 7/11/96 * Language: C++ * * * * ***************************************************************************** */ #define SQLPARSERGLOBALS_FLAGS // must precede all #include's #define SQLPARSERGLOBALS_NADEFAULTS #include "Platform.h" #include "NADefaults.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #ifdef NA_HAS_SEARCH_H #include <search.h> // use the bsearch binary search routine of the C RTL #else #include <unistd.h> // on OSS, bsearch comes from unistd.h #endif #include "nsk/nskport.h" #if !defined(NDEBUG) #endif #include "CliDefs.h" #include "CmpContext.h" #include "CmpErrors.h" #include "ComObjectName.h" #include "ComRtUtils.h" #include "ComSchemaName.h" #include "ex_error.h" #include "DefaultConstants.h" #include "DefaultValidator.h" #include "NAClusterInfo.h" #include "parser.h" #include "sql_id.h" #include "SQLCLIdev.h" #include "Sqlcomp.h" #include "StmtCompilationMode.h" #include "OptimizerSimulator.h" #include "CmpSeabaseDDL.h" #include "Globals.h" #include "QCache.h" #include "SqlParserGlobals.h" // MUST be last #include! #include "seabed/ms.h" #include "seabed/fs.h" #define NADHEAP CTXTHEAP #define ERRWARN(msg) ToErrorOrWarning(msg, errOrWarn) #define ERRWARNLOOP(msg) ToErrorOrWarning(msg, errOrWarnLOOP) #define ENUM_RANGE_CHECK(e) (e >= 0 && (size_t)e < numDefaultAttributes()) #define ATTR_RANGE_CHECK ENUM_RANGE_CHECK(attrEnum) #ifndef NDEBUG #define ATTR_RANGE_ASSERT CMPASSERT(ATTR_RANGE_CHECK) #else #define ATTR_RANGE_ASSERT #endif // ------------------------------------------------------------------------- // This table contains defaults used in SQLARK. // To add a default, put it in sqlcomp/DefaultConstants.h and in this table. // // The #define declares the domain (allowed range of values) of the attr-value; // typically it is Int1 or UI1 (signed or unsigned integral, >=1) // to prevent division-by-zero errors in the calling code. // // The first column is the internal enum value from sqlcomp/DefaultConstants.h. // The second column is the default value as a string. // // The DDxxxx macro identifies the domain of the attribute // (the range and properties of the possible values). // // XDDxxxx does the same *and* externalizes the attribute // (makes it visible to SHOWCONTROL; *you* need to tell Pubs to document it). // // SDDxxxx does the same and externalizes the attribute to HP support personnel // (makes it visible to HPDM when support is logged on; *you* need to tell Pubs // to document it in the support manual. You can set the // SHOWCONTROL_SUPPORT_ATTRS CQD to ON to see all the externalized and // support-level CQDs). // // For instance, DDflt0 allows any nonnegative floating-point number, while // DDflte allows any positive float (the e stands for epsilon, that tiniest // scintilla >0 in classical calculus, and something like +1E-38 on a Pentium). // DDui allows only nonnegative integral values (ui=unsigned int), // DDui1 allows only ints > 0, DDui2 only nonzero multiples of 2, etc. // // DDkwd validates keywords. Each attribute that is DDkwd has its own subset // of acceptable tokens -- the default behavior is that the attr is bivalent // (ON/OFF or TRUE/FALSE or ENABLE/DISABLE). If you want different keywords, // see enum DefaultToken in DefaultConstants.h, and NADefaults::token() below. // // Other DD's validate percentages, and Ansi names. Certainly more could be // defined, for more restrictive ranges or other criteria. // ************************************************************************* // NOTE: You must keep the entire list in alphabetical order, // or else the lookup will not work!!!!!!! Use only CAPITAL LETTERS!!!!!!!!! // ************************************************************************* // NOTE 2: If you choose to "hide" the default default value by setting it to // "ENABLE" or "SYSTEM" or "", your code must handle this possibility. // // See OptPhysRelExpr.cpp's handling of PARALLEL_NUM_ESPS, // an unsigned positive int which also accepts the keyword setting of "SYSTEM". // See ImplRule.cpp's use of INSERT_VSBB, a keyword attr which allows "SYSTEM". // // A simple way to handle ON/OFF keywords that you want to hide the default for: // Take OPTIMIZER_PRUNING as an example. Right now, it appears below with // default "OFF", and opt.cpp does // DisablePruning = (NADEFAULT(OPTIMIZER_PRUNING) == DF_OFF); // To hide the default default, // you would enter it below as "SYSTEM", and opt.cpp would do // DisablePruning = (NADEFAULT(OPTIMIZER_PRUNING) != DF_ON); // (i.e., DF_OFF and DF_SYSTEM would be treated identically, as desired). // ************************************************************************* // NOTE 3: The user is always allowed to say // CONTROL QUERY DEFAULT attrname 'SYSTEM'; -- or 'ENABLE' or '' // What this means is that the current setting for that attribute // reverts to its default-default value. This default-default value // may or may not be "SYSTEM"; this is completely orthogonal/irrelevant // to the CQD usage. // // One gotcha: 'ENABLE' is a synonym for 'SYSTEM', *EXCEPT* when the // SYSTEM default (the default-default) is "DISABLE". // In this case, 'ENABLE' is a synonym for 'ON' // (the opposite of the synonyms DISABLE/OFF). // ************************************************************************* // NOTE 4: After modifying this static table in any way, INCLUDING A CODE MERGE, // for a quick sanity check, run w:/toolbin/checkNAD. // For a complete consistency check, compile this file, link arkcmp, and // runregr TEST050. // ************************************************************************* struct DefaultDefault { enum DefaultConstants attrEnum; const char *attrName; const char *value; const DefaultValidator *validator; UInt32 flags; }; #define DD(name,value,validator) { name, "" # name "", value, validator } #define FDD(name,value,validator,flags) { name, "" # name "", value, validator, flags } #define XDD(name,value,validator) FDD(name,value,validator,DEFAULT_IS_EXTERNALIZED) #define SDD(name,value,validator) FDD(name,value,validator,DEFAULT_IS_FOR_SUPPORT) #define DDS(name,value,validator) FDD(name,value,validator,DEFAULT_IS_SSD) #define XDDS(name,value,validator) FDD(name,value,validator,DEFAULT_IS_SSD | DEFAULT_IS_EXTERNALIZED) #define SDDS(name,value,validator) FDD(name,value,validator,DEFAULT_IS_SSD | DEFAULT_IS_FOR_SUPPORT) #define DD_____(name,value) DD(name,value,&validateUnknown) #define XDD_____(name,value) XDD(name,value,&validateUnknown) #define SDD_____(name,value) SDD(name,value,&validateUnknown) #define DDS_____(name,value) DDS(name,value,&validateUnknown) #define XDDS_____(name,value) XDDS(name,value,&validateUnknown) #define DDansi_(name,value) DD(name,value,&validateAnsiName) #define XDDansi_(name,value) XDD(name,value,&validateAnsiName) #define DDcoll_(name,value) DD(name,value,&validateCollList) #define DDdskNS(name,value) DD(name,value,&validateDiskListNSK) #define SDDdskNS(name,value) SDD(name,value,&validateDiskListNSK) //SCARTCH_DRIVE_LETTERS* made internal RV 06/21/01 CR 10-010425-2440 #define DDdskNT(name,value) DD(name,value,&validateDiskListNT) #define DDint__(name,value) DD(name,value,&validateInt) #define SDDint__(name,value) SDD(name,value,&validateInt) #define XDDint__(name,value) XDD(name,value,&validateInt) #define DDSint__(name,value) DDS(name,value,&validateInt) #define XDDSint__(name,value) XDDS(name,value,&validateInt) #define XDDintN2(name,value) XDD(name,value,&validateIntNeg2) #define DDintN1__(name,value) DD(name,value,&validateIntNeg1) #define DDpct__(name,value) DD(name,value,&validatePct) #define XDDpct__(name,value) XDD(name,value,&validatePct) #define SDDpct__(name,value) SDD(name,value,&validatePct) #define DDpct1_50(name,value) DD(name,value,&validatePct1_t50) #define DD0_10485760(name,value) DD(name,value,&validate0_10485760) #define DD0_255(name,value) DD(name,value,&validate0_255) #define DD0_200000(name,value) DD(name,value,&validate0_200000) #define XDD0_200000(name,value) XDD(name,value,&validate0_200000) #define DD1_200000(name,value) DD(name,value,&validate1_200000) #define XDDui30_32000(name,value) XDD(name,value,&validate30_32000) #define DDui30_246(name,value) DD(name,value,&validate30_246) #define DDui50_4194303(name,value) DD(name,value,&validate50_4194303) #define DD1_24(name,value) DD(name,value,&validate1_24) #define XDD1_1024(name,value) XDD(name,value,&validate1_1024) #define DD1_1024(name,value) DD(name,value,&validate1_1024) #define DD18_128(name,value) DD(name,value,&validate18_128) #define DD1_128(name,value) DD(name,value,&validate1_128) #define DDui___(name,value) DD(name,value,&validateUI) #define XDDui___(name,value) XDD(name,value,&validateUI) #define SDDui___(name,value) SDD(name,value,&validateUI) #define DDui1__(name,value) DD(name,value,&validateUI1) #define XDDui1__(name,value) XDD(name,value,&validateUI1) #define SDDui1__(name,value) SDD(name,value,&validateUI1) #define DDui2__(name,value) DD(name,value,&validateUI2) #define XDDui2__(name,value) XDD(name,value,&validateUI2) #define DDui8__(name,value) DD(name,value,&validateUI8) #define DDui512(name,value) DD(name,value,&validateUI512) #define DDui0_5(name,value) DD(name,value,&validateUIntFrom0To5) #define XDDui0_5(name,value) XDD(name,value,&validateUIntFrom0To5) #define DDui1_6(name,value) DD(name,value,&validateUIntFrom1To6) #define DDui1_10(name,value) DD(name,value,&validateUIntFrom1To10) #define DDui2_10(name,value) DD(name,value,&validateUIntFrom2To10) #define DDui1500_4000(name,value) DD(name,value,&validateUIntFrom1500To4000) #define DDipcBu(name,value) DD(name,value,&validateIPCBuf) #define XDDipcBu(name,value) XDD(name,value,&validateIPCBuf) #define DDflt__(name,value) DD(name,value,&validateFlt) #define XDDflt__(name,value) XDD(name,value,&validateFlt) #define SDDflt__(name,value) SDD(name,value,&validateFlt) #define DDflt0_(name,value) DD(name,value,&validateFlt0) #define XDDflt0_(name,value) XDD(name,value,&validateFlt0) #define SDDflt0_(name,value) SDD(name,value,&validateFlt0) #define DDflte_(name,value) DD(name,value,&validateFltE) #define XDDflte_(name,value) XDD(name,value,&validateFltE) #define SDDflte_(name,value) SDD(name,value,&validateFltE) #define DDflt1_(name,value) DD(name,value,&validateFlt1) #define XDDflt1_(name,value) XDD(name,value,&validateFlt1) #define DDflt_0_1(name,value) DD(name,value,&validateFlt_0_1) #define XDDflt_0_1(name,value) XDD(name,value,&validateFlt_0_1) #define DDkwd__(name,value) DD(name,value,&validateKwd) #define XDDkwd__(name,value) XDD(name,value,&validateKwd) #define SDDkwd__(name,value) SDD(name,value,&validateKwd) #define DDSkwd__(name,value) DDS(name,value,&validateKwd) #define SDDSkwd__(name,value) SDDS(name,value,&validateKwd) #define DDnskv_(name,value) DD(name,value,&validateNSKV) #define DDnsksv(name,value) DD(name,value,&validateNSKSV) #define DDnsksy(name,value) DD(name,value,&validateNSKSY) #define DDnsklo(name,value) DD(name,value,&validateNSKMPLoc) #define DD1_4096(name,value) DD(name,value,&validate1_4096) #define DD0_18(name,value) DD(name,value,&validate0_18) #define DD0_64(name,value) DD(name,value,&validate0_64) #define DD16_64(name,value) DD(name,value,&validate16_64) #define DDvol__(name,value) DD(name,value,&validateVol) #define SDDvol__(name,value) SDD(name,value,&validateVol) #define DDalis_(name,value) DD(name,value,&validateAnsiList) #define XDDalis_(name,value) XDD(name,value,&validateAnsiList) #define XDDpos__(name,value) XDD(name,value,&validatePOSTableSizes) #define SDDpos__(name,value) SDD(name,value,&validatePOSTableSizes) #define DDpos__(name,value) DD(name,value,&validatePOSTableSizes) #define DDtp___(name,value) DD(name,value,&validateTraceStr) #define DDosch_(name,value) DD(name,value,&validateOverrideSchema) #define SDDosch_(name,value) SDD(name,value,&validateOverrideSchema) #define DDpsch_(name,value) DD(name,value,&validatePublicSchema) #define SDDpsch_(name,value) SDD(name,value,&validatePublicSchema) #define DDrlis_(name,value) DD(name,value,&validateRoleNameList) #define XDDrlis_(name,value) XDD(name,value,&validateRoleNameList) #define DDrver_(name,value) DD(name,value,&validateReplIoVersion) #define XDDMVA__(name,value) XDD(name,value,&validateMVAge) #define DDusht_(name,value) DD(name,value,&validate_uint16) const DefaultValidator validateUnknown; const DefaultValidator validateAnsiName(CASE_SENSITIVE_ANSI); // e.g. 'c.s.tbl' const ValidateDiskListNSK validateDiskListNSK; const ValidateDiskListNT validateDiskListNT; ValidateCollationList validateCollList(TRUE/*mp-format*/); // list collations const ValidateInt validateInt; // allows neg, zero, pos ints const ValidateIntNeg1 validateIntNeg1;// allows -1 to +infinity ints const ValidateIntNeg1 validateIntNeg2;// allows -1 to +infinity ints const ValidatePercent validatePct; // allows zero to 100 (integral %age) const ValidateNumericRange validatePct1_t50(VALID_UINT, 1, (float)50);// allows 1 to 50 (integral %age) const Validate_0_10485760 validate0_10485760; // allows zero to 10Meg (integer) const Validate_0_255 validate0_255; // allows zero to 255 (integer) const Validate_0_200000 validate0_200000; // allows zero to 200000 (integer) const Validate_1_200000 validate1_200000; // allows 1 to 200000 (integer) const Validate_30_32000 validate30_32000; // allows 30 to 32000 const Validate_30_246 validate30_246; // allows 30 to 246 const Validate_50_4194303 validate50_4194303; // allows 50 to 4194303 (integer) const Validate_1_24 validate1_24; // allows 1 to 24 (integer) const ValidateUInt validateUI; // allows zero and pos const ValidateUInt1 validateUI1; // allows pos only (>= 1) const ValidateUInt2 validateUI2(2); // allows pos multiples of 2 only const ValidateUInt2 validateUI8(8); // pos multiples of 8 only const ValidateUInt2 validateUI512(512); // pos multiples of 512 only const ValidateUIntFrom0To5 validateUIntFrom0To5; // integer from 0 to 5 const ValidateUIntFrom1500To4000 validateUIntFrom1500To4000; // integer from 1 to 6 const ValidateUIntFrom1To6 validateUIntFrom1To6; // integer from 1 to 6 const ValidateUIntFrom1To10 validateUIntFrom1To10; // integer from 1 to 10 const ValidateUIntFrom2To10 validateUIntFrom2To10; // integer from 2 to 10 const ValidateIPCBuf validateIPCBuf; // for IPC message buffers (DP2 msgs) const ValidateFlt validateFlt; // allows neg, zero, pos (all nums) const ValidateFltMin0 validateFlt0; // allows zero and pos const ValidateFltMinEpsilon validateFltE; // allows pos only (>= epsilon > 0) const ValidateFltMin1 validateFlt1; // allows pos only (>= 1) const ValidateSelectivity ValidateSelectivity; // allows 0 to 1 (float) const ValidateFlt_0_1 validateFlt_0_1; // allows 0 to 1 (float) const ValidateKeyword validateKwd; // allows relevant keywords only const ValidateNSKVol validateNSKV; // allows NSK volumes ($X, e.g.) const ValidateNSKSubVol validateNSKSV; // allows NSK subvols const ValidateVolumeList validateVol; // allows ':' separ. list of $volumes const ValidateNSKSystem validateNSKSY; // allows NSK system names const ValidateNSKMPLoc validateNSKMPLoc; // allows NSK MP cat names($X.Y) const Validate_1_4096 validate1_4096; // allows 1 to 4096 (integer) which is max character size supported. const Validate_0_18 validate0_18; // allows 0 to 18 (integer) because 18 is max precision supported. const Validate_1_1024 validate1_1024; // allows 1 to 1024 (integer). const Validate_0_64 validate0_64; // allows 0 to 64 (integer) const Validate_16_64 validate16_64; // allows 16 to 64 (integer) const Validate_18_128 validate18_128; // allows 18 to 128 (integer). const Validate_1_128 validate1_128; // allows 1 to 128 (integer). // allows ':' separated list of three part ANSI names const ValidateAnsiList validateAnsiList; // allows ',' separated list of role names const ValidateRoleNameList validateRoleNameList; const ValidatePOSTableSizes validatePOSTableSizes; const ValidateTraceStr validateTraceStr; const ValidateOverrideSchema validateOverrideSchema; // check OverrideSchema format const ValidatePublicSchema validatePublicSchema; // This high value should be same as default value of REPLICATE_IO_VERSION const ValidateReplIoVersion validateReplIoVersion(11,17); const ValidateMVAge validateMVAge; const Validate_uint16 validate_uint16; // See the NOTEs above for how to maintain this list! THREAD_P DefaultDefault defaultDefaults[] = { DDflt0_(ACCEPTABLE_INPUTESTLOGPROP_ERROR, "0.5"), SDDint__(AFFINITY_VALUE, "-2"), SDDkwd__(ALLOW_AUDIT_ATTRIBUTE_CHANGE, "FALSE"), // Used to control if row sampling will use the sample operator in SQL/MX or the // this should be used for testing only. DML should not be executed on // non-audited tables DDkwd__(ALLOW_DML_ON_NONAUDITED_TABLE, "OFF"), // DP2_EXECUTOR_POSITION_SAMPLE method in DP2. // Valid values are ON, OFF and SYSTEM // ON => choose DP2_ROW_SAMPLING over row sampling in EID, if sampling % is less than 50. // OFF => choose EID row sampling over DP2 row sampling regardless of sampling % // SYSTEM => update stats will choose DP row sampling if sampling % is less than 5. SDDkwd__(ALLOW_DP2_ROW_SAMPLING, "SYSTEM"), DDkwd__(ALLOW_FIRSTN_IN_SUBQUERIES, "FALSE"), // ON/OFF flag to invoke ghost objects from non-licensed process (non-super.super user) who can not use parserflags DDkwd__(ALLOW_GHOST_OBJECTS, "OFF"), // This default, if set to ON, will allow Translate nodes (to/from UCS2) // to be automatically inserted by the Binder if some children of an // ItemExpr are declared as UCS2 and some are declared as ISO88591. DDkwd__(ALLOW_IMPLICIT_CHAR_CASTING, "ON"), // this default, if set to ON, will allow certain incompatible // assignment, like string to int. The assignment will be done by // implicitely CASTing one operand to another as long as CAST between // the two is supported. See binder for details. DDkwd__(ALLOW_INCOMPATIBLE_ASSIGNMENT, "OFF"), // this default, if set to ON, will allow certain incompatible // comparisons, like string to int. The comparison will be done by // implicitely CASTing one operand to another as long as CAST between // the two is supported. See binder for details. DDkwd__(ALLOW_INCOMPATIBLE_COMPARISON, "OFF"), // if set to 2, the replicateNonKeyVEGPred() mdamkey method // will try to use inputs to filter out VEG elements that are not // local to the associated table to minimize predicate replication. // It is defaulted to 0 (off), as there is some concern that this algoritm // might produce to few replications, which could lead to incorrect results. // Setting the Value to 1 will try a simpler optimization DDui___(ALLOW_INPUT_PRED_REPLICATION_REDUCTION,"0"), // if set to ON, then isolation level (read committed, etc) could be // specified in a regular CREATE VIEW (not a create MV) statement. DDkwd__(ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW, "ON"), // if set to ON, then we allow subqueries of degree > 1 in the // select list. DDkwd__(ALLOW_MULTIDEGREE_SUBQ_IN_SELECTLIST, "SYSTEM"), // by default, a primary key or unique constraint must be non-nullable. // This default, if set, allows them to be nullable. // The default value is OFF. DDkwd__(ALLOW_NULLABLE_UNIQUE_KEY_CONSTRAINT, "OFF"), // if set to ON, then ORDER BY could be // specified in a regular CREATE VIEW (not a create MV) statement. DDkwd__(ALLOW_ORDER_BY_IN_CREATE_VIEW, "ON"), // rand() function in sql is disabled unless this CQD is turned on DDkwd__(ALLOW_RAND_FUNCTION, "OFF"), DDkwd__(ALLOW_RANGE_PARTITIONING, "TRUE"), DDkwd__(ALLOW_RENAME_OF_MVF_OR_SUBQ, "OFF"), DDkwd__(ALLOW_RISKY_UPDATE_WITH_NO_ROLLBACK, "OFF"), DDkwd__(ALLOW_SUBQ_IN_SET, "SYSTEM"), DDkwd__(ALLOW_UNEXTERNALIZED_MAINTAIN_OPTIONS, "OFF"), DDSkwd__(ALTPRI_ESP, ""), DDSkwd__(ALTPRI_MASTER, ""), DDS_____(AQR_ENTRIES, ""), DDkwd__(AQR_WNR, "ON"), DDkwd__(AQR_WNR_DELETE_NO_ROWCOUNT, "OFF"), DDkwd__(AQR_WNR_EXPLAIN_INSERT, "OFF"), DDkwd__(AQR_WNR_INSERT_CLEANUP, "OFF"), DDkwd__(AQR_WNR_LOCK_INSERT_TARGET, "OFF"), DDkwd__(ARKCMP_FAKE_HW, "OFF"), DDkwd__(ASG_FEATURE, "ON"), // Set ASM cache DDkwd__(ASM_ALLOWED, "ON"), // Precompute statistics in ASM DDkwd__(ASM_PRECOMPUTE, "OFF"), DDkwd__(ASYMMETRIC_JOIN_TRANSFORMATION, "MAXIMUM"), DDkwd__(ATTEMPT_ASYNCHRONOUS_ACCESS, "ON"), DDkwd__(ATTEMPT_ESP_PARALLELISM, "ON"), DDkwd__(ATTEMPT_REVERSE_SYNCHRONOUS_ORDER, "ON"), // Online Populate Index uses AuditImage for index tables only. // By setting this CQD to ON, one can generate AuditImage for // tables also. DDkwd__(AUDIT_IMAGE_FOR_TABLES, "OFF"), DDkwd__(AUTOMATIC_RECOMPILATION, "OFF"), DDkwd__(AUTO_QUERY_RETRY, "SYSTEM"), XDDkwd__(AUTO_QUERY_RETRY_WARNINGS, "OFF"), DDkwd__(BASE_NUM_PAS_ON_ACTIVE_PARTS, "OFF"), // see comments in DefaultConstants.h DDkwd__(BIGNUM_IO, "SYSTEM"), XDDkwd__(BLOCK_TO_PREVENT_HALLOWEEN, "ON"), DDflte_(BMO_CITIZENSHIP_FACTOR, "1."), DDui1__(BMO_MEMORY_SIZE, "204800"), // percentage of physical main memory availabe for BMO. // This value is only used by HJ and HGB to come up with // an initial estimate for the number of clusters to allocate. // It does NOT by any means determine the amount of memory // used by a BMO. The memory usage depends on the amount of // memory available during execution and the amount of input // data. DDflte_(BMO_MEMORY_USAGE_PERCENT, "5."), // When on, then try to bulk move nullable and variable length column values. DDkwd__(BULK_MOVE_NULL_VARCHAR, "ON"), //Temporary fix to bypass volatile schema name checking for non-table objects - ALM Case#4764 DDkwd__(BYPASS_CHECK_FOR_VOLATILE_SCHEMA_NAME, "OFF"), DDkwd__(CACHE_HISTOGRAMS, "ON"), DDkwd__(CACHE_HISTOGRAMS_CHECK_FOR_LEAKS, "OFF"), DD0_200000(CACHE_HISTOGRAMS_IN_KB, "32768"), DDkwd__(CACHE_HISTOGRAMS_MONITOR_HIST_DETAIL, "OFF"), DDkwd__(CACHE_HISTOGRAMS_MONITOR_MEM_DETAIL, "OFF"), DD_____(CACHE_HISTOGRAMS_MONITOR_OUTPUT_FILE, ""), // This is the default time interval, during which we ensure that // the histograms in the cache are correct. If the histograms in the // cache are older than this default interval, and the HISTOGRAMS // table last modification time is older than this, any requested // histograms will be checked to see if it was modified more recently // than the histograms in cache (the READ_TIME fields will also be // updated). If so, the optimizer will refetch histograms. XDDui___(CACHE_HISTOGRAMS_REFRESH_INTERVAL, "3600"), DD_____(CACHE_HISTOGRAMS_TRACE_OUTPUT_FILE, ""), DDkwd__(CALL_EMBEDDED_ARKCMP, "OFF"), DDui___(CANCEL_MINIMUM_BLOCKING_INTERVAL, "60"), DDkwd__(CASCADED_GROUPBY_TRANSFORMATION, "ON"), XDDansi_(CATALOG, TRAFODION_SYSCAT_LIT), DDkwd__(CAT_ALLOW_NEW_FEATUREX, "OFF"), // Control whether authorization caches immutable users DDkwd__(CAT_AUTHORIZATION_CACHE_IMMUTABLE_USERS, "ON"), DDkwd__(CAT_CREATE_SCHEMA_LABELS_ON_ALL_SEGMENTS, "ON"), DDkwd__(CAT_DEFAULT_COMPRESSION, "NONE"), // Metadata table distribution schemes // OFF - Place all metadata tables on one single disk // LOCAL_NODE - Distribute metadata tables across disks on local segment // where first schema in the catalog is created // ON - Distribute metadata tables across disks in local segment // and visible remote segments SDDkwd__(CAT_DISTRIBUTE_METADATA, "ON"), //SDDkwd__(CAT_DISTRIBUTE_METADATA, "ON"), // This disables Query Invalidation processing in catman when set to "OFF" SDDkwd__(CAT_ENABLE_QUERY_INVALIDATION, "ON"), // Throw an error if a column is part of the store by clause and // is not defined as NOT NULL return an error DDkwd__(CAT_ERROR_ON_NOTNULL_STOREBY, "ON"), DDui1__(CAT_FS_TIMEOUT, "9000"), // Used to make ignore "already exists" error in Create and // "does not exist" error in Drop. DDkwd__(CAT_IGNORE_ALREADY_EXISTS_ERROR, "OFF"), DDkwd__(CAT_IGNORE_DOES_NOT_EXIST_ERROR, "OFF"), // Used to make catman test134 predictable DDkwd__(CAT_IGNORE_EMPTY_CATALOGS, "OFF"), // Catalog Manager internal support for REPLICATE AUTHORIZATION DDkwd__(CAT_IGNORE_REPL_AUTHIDS_ERROR, "OFF"), // This enables the DB Limits functionality. If set to OFF, then blocksize // is restricted to 4096 and clustering key size is limited to 255 bytes. // DB Limits checking is turned off on NT since NT's DP2 does not support // large blocks or keys. DDkwd__(CAT_LARGE_BLOCKS_LARGE_KEYS, "ON"), // If DB Limits is enabled, then increase the default blocksize to 32K // on NSK if the object's clustering key length is larger than this value. DDui1__(CAT_LARGE_BLOCKS_MAX_KEYSIZE, "1"), // If DB Limits is enabled, then increase the default blocksize to 32K // on NSK if the object's row size is larger than this value. DDui1__(CAT_LARGE_BLOCKS_MAX_ROWSIZE, "1"), // Controls how pathnames for routines/procedures/SPJs are interpreted DDkwd__(CAT_LIBRARY_PATH_RELATIVE, "OFF"), DDkwd__(CAT_MORE_SCHEMA_PRIVS, "ON"), DDkwd__(CAT_OVERRIDE_CREATE_DISABLE, "OFF"), // This forces an rcb to be created with a different version number // A "0" means to take the current mxv version DDui1__(CAT_RCB_VERSION, "0"), // Controls creation of column privileges for object-level privileges DDkwd__(CAT_REDUNDANT_COLUMN_PRIVS, "ON"), // If schema owner is object owner is ON, then the default owner for objects is the // schema owner. DDkwd__(CAT_SCHEMA_OWNER_IS_OBJECT_OWNER, "OFF"), DDkwd__(CAT_TEST_BOOL, "OFF"), DDint__(CAT_TEST_POINT, "0"), DD_____(CAT_TEST_STRING, "NONE"), // CMP_ERR_LOG_FILE indicates where to save a log for certain errors. DD_____(CMP_ERR_LOG_FILE, "tdm_arkcmp_errors.log"), DDkwd__(COLLECT_REORG_STATS, "ON"), // tracking compilers specific defaults DDint__(COMPILER_TRACKING_INTERVAL, "0"), DD_____(COMPILER_TRACKING_LOGFILE, "NONE"), DDkwd__(COMPILER_TRACKING_LOGTABLE, "OFF"), DDkwd__(COMPILE_TIME_MONITOR, "OFF"), DD_____(COMPILE_TIME_MONITOR_LOG_ALLTIME_ONLY, "OFF"), DD_____(COMPILE_TIME_MONITOR_OUTPUT_FILE, "NONE"), // complexity threshold beyond which a // MultiJoin query is considered too complex DDflt0_(COMPLEX_MJ_QUERY_THRESHOLD, "1000000"), // Switch between new aligned internal format and exploded format DDkwd__(COMPRESSED_INTERNAL_FORMAT, "SYSTEM"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_BMO, "SYSTEM"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_BMO_AFFINITY, "ON"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_BULK_MOVE, "ON"), DDflt0_(COMPRESSED_INTERNAL_FORMAT_DEFRAG_RATIO, "0.30"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_EXPLAIN, "OFF"), DDui1__(COMPRESSED_INTERNAL_FORMAT_MIN_ROW_SIZE, "32"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_ROOT_DOES_CONVERSION, "OFF"), DDflt0_(COMPRESSED_INTERNAL_FORMAT_ROW_SIZE_ADJ, "0.90"), XDDkwd__(COMPRESSION_TYPE, "NONE"), // These are switches and variables to use for compiler debugging DDkwd__(COMP_BOOL_1, "OFF"), DDkwd__(COMP_BOOL_10, "OFF"), DDkwd__(COMP_BOOL_100, "OFF"), DDkwd__(COMP_BOOL_101, "OFF"), DDkwd__(COMP_BOOL_102, "OFF"), DDkwd__(COMP_BOOL_103, "OFF"), DDkwd__(COMP_BOOL_104, "OFF"), DDkwd__(COMP_BOOL_105, "OFF"), DDkwd__(COMP_BOOL_106, "OFF"), DDkwd__(COMP_BOOL_107, "ON"), // Being used for testing default predicate synthesis in cardinality estimation DDkwd__(COMP_BOOL_108, "ON"), // Being used for testing default predicate synthesis in cardinality estimation DDkwd__(COMP_BOOL_109, "OFF"), DDkwd__(COMP_BOOL_11, "OFF"), DDkwd__(COMP_BOOL_110, "OFF"), DDkwd__(COMP_BOOL_111, "OFF"), DDkwd__(COMP_BOOL_112, "OFF"), DDkwd__(COMP_BOOL_113, "OFF"), DDkwd__(COMP_BOOL_114, "OFF"), DDkwd__(COMP_BOOL_115, "OFF"), DDkwd__(COMP_BOOL_116, "OFF"), DDkwd__(COMP_BOOL_117, "OFF"), DDkwd__(COMP_BOOL_118, "OFF"), // soln 10-100508-0135 - allow undo of fix. DDkwd__(COMP_BOOL_119, "OFF"), DDkwd__(COMP_BOOL_12, "OFF"), DDkwd__(COMP_BOOL_120, "OFF"), DDkwd__(COMP_BOOL_121, "OFF"), DDkwd__(COMP_BOOL_122, "ON"), // Solution 10-081203-7708 fix DDkwd__(COMP_BOOL_123, "OFF"), DDkwd__(COMP_BOOL_124, "OFF"), DDkwd__(COMP_BOOL_125, "ON"), DDkwd__(COMP_BOOL_126, "OFF"), DDkwd__(COMP_BOOL_127, "ON"), DDkwd__(COMP_BOOL_128, "ON"), DDkwd__(COMP_BOOL_129, "ON"), DDkwd__(COMP_BOOL_13, "OFF"), DDkwd__(COMP_BOOL_130, "ON"), DDkwd__(COMP_BOOL_131, "OFF"), DDkwd__(COMP_BOOL_132, "OFF"), DDkwd__(COMP_BOOL_133, "OFF"), DDkwd__(COMP_BOOL_134, "ON"), DDkwd__(COMP_BOOL_135, "ON"), DDkwd__(COMP_BOOL_136, "OFF"), DDkwd__(COMP_BOOL_137, "OFF"), // ON enables logging of RewriteJoinPred DDkwd__(COMP_BOOL_138, "OFF"), // ON disables tryToRewriteJoinPredicate DDkwd__(COMP_BOOL_139, "OFF"), DDkwd__(COMP_BOOL_14, "ON"), DDkwd__(COMP_BOOL_140, "ON"), DDkwd__(COMP_BOOL_141, "ON"), // Used for testing MC UEC adjustment for uplifting join cardinality DDkwd__(COMP_BOOL_142, "ON"), // Used for turning on Compile Time Statistics caching DDkwd__(COMP_BOOL_143, "OFF"), DDkwd__(COMP_BOOL_144, "OFF"), // only Key columns usage as a part of materialization of disjuncts is controlled by the CQD DDkwd__(COMP_BOOL_145, "ON"), // Used for selectivity adjustment for MC Joins DDkwd__(COMP_BOOL_146, "OFF"), DDkwd__(COMP_BOOL_147, "OFF"), DDkwd__(COMP_BOOL_148, "ON"), // Used for GroupBy Cardinality Enhancement for complex expressions DDkwd__(COMP_BOOL_149, "ON"), // Used for testing multi-col uniqueness cardinality enhancement DDkwd__(COMP_BOOL_15, "OFF"), DDkwd__(COMP_BOOL_150, "OFF"), DDkwd__(COMP_BOOL_151, "OFF"), DDkwd__(COMP_BOOL_152, "OFF"), DDkwd__(COMP_BOOL_153, "ON"), // skew buster: ON == use round robin, else Co-located. DDkwd__(COMP_BOOL_154, "OFF"), DDkwd__(COMP_BOOL_155, "OFF"), DDkwd__(COMP_BOOL_156, "ON"), // Used by RTS to turn on RTS Stats collection for ROOT operators DDkwd__(COMP_BOOL_157, "OFF"), DDkwd__(COMP_BOOL_158, "OFF"), DDkwd__(COMP_BOOL_159, "OFF"), DDkwd__(COMP_BOOL_16, "OFF"), DDkwd__(COMP_BOOL_160, "OFF"), DDkwd__(COMP_BOOL_161, "OFF"), DDkwd__(COMP_BOOL_162, "ON"), // transform NOT EXISTS subquery using anti_semijoin instead of Join-Agg DDkwd__(COMP_BOOL_163, "OFF"), DDkwd__(COMP_BOOL_164, "OFF"), DDkwd__(COMP_BOOL_165, "ON"), // set to 'ON' in M5 for SQ DDkwd__(COMP_BOOL_166, "OFF"), // ON --> turn off fix for 10-100310-8659. DDkwd__(COMP_BOOL_167, "OFF"), DDkwd__(COMP_BOOL_168, "ON"), DDkwd__(COMP_BOOL_169, "OFF"), DDkwd__(COMP_BOOL_17, "ON"), DDkwd__(COMP_BOOL_170, "ON"), DDkwd__(COMP_BOOL_171, "OFF"), DDkwd__(COMP_BOOL_172, "OFF"), DDkwd__(COMP_BOOL_173, "OFF"), // fix: make odbc params nullable DDkwd__(COMP_BOOL_174, "ON"), // internal usage: merge stmt DDkwd__(COMP_BOOL_175, "OFF"), // internal usage: merge stmt DDkwd__(COMP_BOOL_176, "OFF"), DDkwd__(COMP_BOOL_177, "OFF"), DDkwd__(COMP_BOOL_178, "OFF"), DDkwd__(COMP_BOOL_179, "OFF"), DDkwd__(COMP_BOOL_18, "OFF"), DDkwd__(COMP_BOOL_180, "OFF"), DDkwd__(COMP_BOOL_181, "OFF"), DDkwd__(COMP_BOOL_182, "OFF"), // internal usage DDkwd__(COMP_BOOL_183, "OFF"), DDkwd__(COMP_BOOL_184, "ON"), // ON => use min probe size for mdam. Using min probe size of 1 or 2 currently has a bug so this is not the default. OFF => use default probe size of 100 DDkwd__(COMP_BOOL_185, "ON"), //Fix, allows extract(year from current_date) to be treated as a userinput DDkwd__(COMP_BOOL_186, "OFF"), DDkwd__(COMP_BOOL_187, "OFF"), // reserved for internal usage DDkwd__(COMP_BOOL_188, "OFF"), DDkwd__(COMP_BOOL_189, "OFF"), // reserved for internal usage DDkwd__(COMP_BOOL_19, "OFF"), DDkwd__(COMP_BOOL_190, "OFF"), DDkwd__(COMP_BOOL_191, "OFF"), // Temp for UDF metadata switch DDkwd__(COMP_BOOL_192, "OFF"), DDkwd__(COMP_BOOL_193, "OFF"), DDkwd__(COMP_BOOL_194, "OFF"), DDkwd__(COMP_BOOL_195, "OFF"), // used to enable unexternalized get statistics options. DDkwd__(COMP_BOOL_196, "OFF"), DDkwd__(COMP_BOOL_197, "OFF"), DDkwd__(COMP_BOOL_198, "OFF"), DDkwd__(COMP_BOOL_199, "ON"), DDkwd__(COMP_BOOL_2, "OFF"), DDkwd__(COMP_BOOL_20, "OFF"), // ON -> disable ability of stmt to be canceled. DDkwd__(COMP_BOOL_200, "OFF"), DDkwd__(COMP_BOOL_201, "OFF"), DDkwd__(COMP_BOOL_202, "ON"),// For SQ: // ON: excluding fixup cost // for EXCHANGE for // anti-surf logic; // OFF: do include. // Change to ON in M5 DDkwd__(COMP_BOOL_203, "OFF"), DDkwd__(COMP_BOOL_205, "OFF"), // enable reorg on metadata DDkwd__(COMP_BOOL_206, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_207, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_208, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_209, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_21, "OFF"), DDkwd__(COMP_BOOL_210, "ON"), DDkwd__(COMP_BOOL_211, "ON"), // controls removing constants from group expression DDkwd__(COMP_BOOL_215, "OFF"), DDkwd__(COMP_BOOL_217, "OFF"), DDkwd__(COMP_BOOL_219, "OFF"), // for InMem obj defn DDkwd__(COMP_BOOL_22, "ON"), DDkwd__(COMP_BOOL_220, "OFF"), // UserLoad fastpath opt DDkwd__(COMP_BOOL_221, "OFF"), // unnests a subquery even when there is no explicit correlation DDkwd__(COMP_BOOL_222, "ON"), // R2.5 BR features enabled DDkwd__(COMP_BOOL_223, "OFF"), // enable undocumented options // bulk replicate features DDkwd__(COMP_BOOL_224, "OFF"), // enable undocumented // bulk replicate features DDkwd__(COMP_BOOL_225, "ON"), // enable optimized esps allocation DDkwd__(COMP_BOOL_226, "OFF"), // ON enables UNLOAD feature // for disk label stats. DDkwd__(COMP_BOOL_23, "ON"), DDkwd__(COMP_BOOL_24, "OFF"), // AS enhancement to adjust maxDoP DDkwd__(COMP_BOOL_25, "OFF"), // Being used in Cardinality Estimation DDkwd__(COMP_BOOL_26, "OFF"), DDkwd__(COMP_BOOL_27, "OFF"), DDkwd__(COMP_BOOL_28, "OFF"), DDkwd__(COMP_BOOL_29, "OFF"), DDkwd__(COMP_BOOL_3, "OFF"), DDkwd__(COMP_BOOL_30, "ON"), DDkwd__(COMP_BOOL_31, "OFF"), DDkwd__(COMP_BOOL_32, "OFF"), DDkwd__(COMP_BOOL_33, "OFF"), DDkwd__(COMP_BOOL_34, "OFF"), DDkwd__(COMP_BOOL_35, "OFF"), DDkwd__(COMP_BOOL_36, "OFF"), DDkwd__(COMP_BOOL_37, "OFF"), DDkwd__(COMP_BOOL_38, "OFF"), DDkwd__(COMP_BOOL_39, "OFF"), DDkwd__(COMP_BOOL_4, "OFF"), DDkwd__(COMP_BOOL_40, "ON"), DDkwd__(COMP_BOOL_41, "OFF"), DDkwd__(COMP_BOOL_42, "ON"), DDkwd__(COMP_BOOL_43, "OFF"), DDkwd__(COMP_BOOL_44, "OFF"), DDkwd__(COMP_BOOL_45, "ON"), DDkwd__(COMP_BOOL_46, "OFF"), DDkwd__(COMP_BOOL_47, "ON"), DDkwd__(COMP_BOOL_48, "ON"), // Turned "Off" because of Regression failure DDkwd__(COMP_BOOL_49, "OFF"), DDkwd__(COMP_BOOL_5, "ON"), DDkwd__(COMP_BOOL_50, "OFF"), DDkwd__(COMP_BOOL_51, "OFF"), DDkwd__(COMP_BOOL_52, "OFF"), DDkwd__(COMP_BOOL_53, "ON"), //Turned "ON" for OCB Cost DDkwd__(COMP_BOOL_54, "OFF"), DDkwd__(COMP_BOOL_55, "OFF"), DDkwd__(COMP_BOOL_56, "OFF"), DDkwd__(COMP_BOOL_57, "ON"), DDkwd__(COMP_BOOL_58, "OFF"), DDkwd__(COMP_BOOL_59, "OFF"), DDkwd__(COMP_BOOL_6, "OFF"), // comp_bool_60 is used in costing of an exchange operator. This is // used in deciding to use Nodemap decoupling and other exchange // costing logic. DDkwd__(COMP_BOOL_60, "ON"), DDkwd__(COMP_BOOL_61, "OFF"), DDkwd__(COMP_BOOL_62, "OFF"), DDkwd__(COMP_BOOL_63, "OFF"), DDkwd__(COMP_BOOL_64, "OFF"), DDkwd__(COMP_BOOL_65, "OFF"), DDkwd__(COMP_BOOL_66, "OFF"), DDkwd__(COMP_BOOL_67, "ON"), // Being used in Cardinality Estimation DDkwd__(COMP_BOOL_68, "ON"), DDkwd__(COMP_BOOL_69, "OFF"), DDkwd__(COMP_BOOL_7, "OFF"), DDkwd__(COMP_BOOL_70, "ON"), DDkwd__(COMP_BOOL_71, "OFF"), DDkwd__(COMP_BOOL_72, "OFF"), DDkwd__(COMP_BOOL_73, "OFF"), DDkwd__(COMP_BOOL_74, "ON"), DDkwd__(COMP_BOOL_75, "ON"), DDkwd__(COMP_BOOL_76, "ON"), DDkwd__(COMP_BOOL_77, "OFF"), DDkwd__(COMP_BOOL_78, "OFF"), DDkwd__(COMP_BOOL_79, "ON"), DDkwd__(COMP_BOOL_8, "OFF"), DDkwd__(COMP_BOOL_80, "OFF"), DDkwd__(COMP_BOOL_81, "OFF"), DDkwd__(COMP_BOOL_82, "OFF"), DDkwd__(COMP_BOOL_83, "ON"), DDkwd__(COMP_BOOL_84, "OFF"), DDkwd__(COMP_BOOL_85, "OFF"), DDkwd__(COMP_BOOL_86, "OFF"), DDkwd__(COMP_BOOL_87, "OFF"), DDkwd__(COMP_BOOL_88, "OFF"), DDkwd__(COMP_BOOL_89, "OFF"), DDkwd__(COMP_BOOL_9, "OFF"), DDkwd__(COMP_BOOL_90, "ON"), DDkwd__(COMP_BOOL_91, "OFF"), DDkwd__(COMP_BOOL_92, "OFF"), // used by generator. DDkwd__(COMP_BOOL_93, "ON"), // turn on pushdown for IUDs involving MVs. Default is off DDkwd__(COMP_BOOL_94, "OFF"), DDkwd__(COMP_BOOL_95, "OFF"), DDkwd__(COMP_BOOL_96, "OFF"), DDkwd__(COMP_BOOL_97, "OFF"), DDkwd__(COMP_BOOL_98, "ON"), DDkwd__(COMP_BOOL_99, "OFF"), DDflt0_(COMP_FLOAT_0, "0.002"), DDflt0_(COMP_FLOAT_1, "0.00002"), DDflt0_(COMP_FLOAT_2, "0"), DDflt0_(COMP_FLOAT_3, "0.01"), DDflt0_(COMP_FLOAT_4, "1.1"), DDflt__(COMP_FLOAT_5, "0.01"), // For Split Top cost adjustments : 0.25 DDflt__(COMP_FLOAT_6, "0.67"), // used to set the fudge factor which // is used to estimate cardinality of an // aggregate function in an equi-join expression DDflt__(COMP_FLOAT_7, "1.5"), DDflt__(COMP_FLOAT_8, "0.8"), // min expected #groups when HGB under right side of NLJ DDflt__(COMP_FLOAT_9, "1002.0"), DDint__(COMP_INT_0, "5000"), DDint__(COMP_INT_1, "0"), DDint__(COMP_INT_10, "3"), DDint__(COMP_INT_11, "-1"), DDint__(COMP_INT_12, "0"), DDint__(COMP_INT_13, "0"), DDint__(COMP_INT_14, "0"), DDint__(COMP_INT_15, "7"), DDint__(COMP_INT_16, "1000000"), DDint__(COMP_INT_17, "1000000"), DDint__(COMP_INT_18, "1"), DDint__(COMP_INT_19, "2"), DDint__(COMP_INT_2, "1"), DDint__(COMP_INT_20, "4"), DDint__(COMP_INT_21, "0"), DDint__(COMP_INT_22, "0"), // used to control old parser based INLIST transformation // 0 ==> OFF, positive value implies ON and has the effect of implicitly shutting down much of OR_PRED transformations // this cqd has been retained as a fallback in case OR_PRED has bugs. DDint__(COMP_INT_23, "22"), DDint__(COMP_INT_24, "1000000000"), DDint__(COMP_INT_25, "0"), DDint__(COMP_INT_26, "1"), DDint__(COMP_INT_27, "0"), DDint__(COMP_INT_28, "0"), DDint__(COMP_INT_29, "0"), DDint__(COMP_INT_3, "5"), DDint__(COMP_INT_30, "5"), DDint__(COMP_INT_31, "5"), DDint__(COMP_INT_32, "100"), DDint__(COMP_INT_33, "0"), DDint__(COMP_INT_34, "10000"), // lower bound: 10000 DDint__(COMP_INT_35, "500000"), // upper bound: 200000 DDint__(COMP_INT_36, "128"), // Bounds for producer for OCB DDint__(COMP_INT_37, "0"), DDint__(COMP_INT_38, "0"), // test master's abend DDint__(COMP_INT_39, "0"), // test esp's abend DDint__(COMP_INT_4, "400"), DDint__(COMP_INT_40, "10"), // this defines the percentage of selectivity after applying equality predicates on single column histograms // beyond which the optimizer should use MC stats DDint__(COMP_INT_41, "0"), DDint__(COMP_INT_42, "0"), DDint__(COMP_INT_43, "3"), // this is only for testing purposes. Once HIST_USE_SAMPLE_FOR_CARDINALITY_ESTIMATION is set to ON by default, the value of this CQD should be adjusted DDint__(COMP_INT_44, "1000000"), // frequency threshold above which // a boundary value will be inclded // in the frequentValueList (stats) DDint__(COMP_INT_45, "300"), DDint__(COMP_INT_46, "10"), DDint__(COMP_INT_47, "0"), DDint__(COMP_INT_48, "32"), // # trips thru scheduler task list before eval of CPU time limit. DDint__(COMP_INT_49, "0"), DDint__(COMP_INT_5, "0"), DDint__(COMP_INT_50, "0"), DDint__(COMP_INT_51, "0"), DDint__(COMP_INT_52, "0"), DDint__(COMP_INT_53, "0"), DDint__(COMP_INT_54, "0"), DDint__(COMP_INT_55, "0"), DDint__(COMP_INT_56, "0"), DDint__(COMP_INT_57, "0"), DDint__(COMP_INT_58, "0"), DDint__(COMP_INT_59, "0"), DDint__(COMP_INT_6, "400"), // comp_int_60 is used in costing of an exchnage operator. It is // used to indicate buffer size of a DP2 exchange when sending // messages down. DDint__(COMP_INT_60, "4"), DDint__(COMP_INT_61, "0"), // Exchange operator default value DDint__(COMP_INT_62, "10000"), DDint__(COMP_INT_63, "10000"), // SG Insert issue DDint__(COMP_INT_64, "0"), DDint__(COMP_INT_65, "0"), DDint__(COMP_INT_66, "0"), // to change #buffers per flushed cluster DDint__(COMP_INT_67, "8"), // to test #outer-buffers per a batch DDint__(COMP_INT_68, "0"), DDint__(COMP_INT_69, "0"), DDint__(COMP_INT_7, "10000000"), DDint__(COMP_INT_70, "1000000"), DDint__(COMP_INT_71, "0"), DDint__(COMP_INT_72, "0"), // if set to 1, allows keyPredicate to be inserted without passing key col. DDint__(COMP_INT_73, "1"), // if set to 1, disables cursor_delete plan if there are no alternate indexes. DDint__(COMP_INT_74, "0"), DDint__(COMP_INT_75, "0"), DDint__(COMP_INT_76, "0"), DDint__(COMP_INT_77, "0"), DDint__(COMP_INT_78, "0"), DDint__(COMP_INT_79, "0"), // this is used temporaraly as value for parallel threshold // in case ATTEMPT_ESP_PARALLELISM is set to MAXIMUM DDint__(COMP_INT_8, "20"), DDint__(COMP_INT_80, "3"), DDint__(COMP_INT_81, "0"), DDint__(COMP_INT_82, "0"), DDint__(COMP_INT_83, "0"), // max num of retries after parl purgedata open/control call errs.Default 25. DDint__(COMP_INT_84, "25"), // delay between each paral pd error retry. Default is 2 seconds. DDint__(COMP_INT_85, "2"), DDint__(COMP_INT_86, "0"), DDint__(COMP_INT_87, "0"), DDint__(COMP_INT_88, "0"), DDint__(COMP_INT_89, "2"), DDint__(COMP_INT_9, "0"), DDint__(COMP_INT_90, "0"), DDint__(COMP_INT_91, "0"), DDint__(COMP_INT_92, "0"), DDint__(COMP_INT_93, "0"), DDint__(COMP_INT_94, "0"), DDint__(COMP_INT_95, "0"), DDint__(COMP_INT_96, "0"), DDint__(COMP_INT_97, "0"), DDint__(COMP_INT_98, "512"), DDint__(COMP_INT_99, "10"), DD_____(COMP_STRING_1, "NONE"), DD_____(COMP_STRING_2, ""), DD_____(COMP_STRING_3, ""), DD_____(COMP_STRING_4, ""), DD_____(COMP_STRING_5, ""), DD_____(COMP_STRING_6, ""), // Configured_memory_for defaults are all measured in KB DDui___(CONFIGURED_MEMORY_FOR_BASE, "16384"), DDui___(CONFIGURED_MEMORY_FOR_DAM, "20480"), DDui___(CONFIGURED_MEMORY_FOR_MINIMUM_HASH, "20480"), DDui___(CONFIGURED_MEMORY_FOR_MXESP, "8192"), DDkwd__(CONSTANT_FOLDING, "OFF"), DDkwd__(COSTING_SHORTCUT_GROUPBY_FIX, "ON"), DDflt0_(COST_PROBE_DENSITY_THRESHOLD, ".25"), // As of 3/23/98 the tupp desc. length is 12 bytes. Change when executor // changes. DDflt0_(COST_TUPP_DESC_LENGTH_IN_KB, "0.01171875"), DDflt0_(CPUCOST_COMPARE_COMPLEX_DATA_TYPE_OVERHEAD, "10."), DDflt0_(CPUCOST_COMPARE_COMPLEX_DATA_TYPE_PER_BYTE, ".1"), // Same as CPUCOST_PREDICATE_COMPARISON // Change HH_OP_PROBE_HASH_TABLE when you change this value: DDflt0_(CPUCOST_COMPARE_SIMPLE_DATA_TYPE, ".200"), // no cost overhead assumed: DDflt0_(CPUCOST_COPY_ROW_OVERHEAD, "0."), // change CPUCOST_HASH_PER_KEY when changing this value DDflt0_(CPUCOST_COPY_ROW_PER_BYTE, ".0007"), DDflt0_(CPUCOST_COPY_SIMPLE_DATA_TYPE, ".005"), // This is a per data request overhead cost paid by the cpu DDflt0_(CPUCOST_DATARQST_OVHD, ".01"), DDflt0_(CPUCOST_DM_GET, ".001"), DDflt0_(CPUCOST_DM_UPDATE, ".001"), DDflt0_(CPUCOST_ENCODE_PER_BYTE, ".002"), DDflt0_(CPUCOST_ESP_INITIALIZATION, "10"), // The previous observation had calculated the number of seconds to // aggregate incorrectly. Now: // Number of seconds to scan 100,000 rows @ 208 bytes: 4 // Number of seconds to scan 100,000 rows @ 208 bytes and aggregate // 15 aggregates: 17 // Thus, number of seconds per aggregate = (17-4)/15 = 0.866667 // CPUCOST_PER_ROW = 1.13333/(0.00005*100,000) = 0.1733 // previous observation // It takes 13.96 seconds to aggregate 99,999 rows using // 15 expressions, thus at 0.00005 et_cpu, we have that // the cost to eval an arith op is: // 6.14 / (0.00005 * 99,9999 * 15) = 0.0819 DDflt0_(CPUCOST_EVAL_ARITH_OP, ".0305"), DDflt0_(CPUCOST_EVAL_FUNC_DEFAULT, "10."), DDflt0_(CPUCOST_EVAL_LOGICAL_OP, "1."), DDflt0_(CPUCOST_EVAL_SIMPLE_PREDICATE, "1."), DDflt0_(CPUCOST_EXCHANGE_COST_PER_BYTE, ".002"), DDflt0_(CPUCOST_EXCHANGE_COST_PER_ROW, ".002"), DDflt0_(CPUCOST_EXCHANGE_INTERNODE_COST_PER_BYTE, ".008"), DDflt0_(CPUCOST_EXCHANGE_MAPPING_FUNCTION, ".01"), // was 0.1, but now 0.011 // XDDflt0_(CPUCOST_EXCHANGE_REMOTENODE_COST_PER_BYTE, ".011"), // Set the additional cost of copying a byte to message buffer for // remote node to be the same as for inter node, 0.01 // Also change it to be internalized DDflt0_(CPUCOST_EXCHANGE_REMOTENODE_COST_PER_BYTE, ".01"), DDflt0_(CPUCOST_EXCHANGE_SPLIT_FUNCTION, ".01"), // Assume // CPUCOST_HASH_PER_KEY = 4 * CPUCOST_HASH_PER_BYTE // History: // Before 01/06/98: 0.005 DDflt0_(CPUCOST_HASH_PER_BYTE, ".057325"), // Assume // CPUCOST_HASH_PER_KEY = 4 * CPUCOST_HASH_PER_BYTE // From observation: // For a case when all the hash table fits into memory: // 01/05/98: 42,105 rows inserted per second @ 0.00005 seconds // per thousand of instructions, give: // seconds to insert one row = 1/42105 = 0.00002375 // thd. of instructions per row inserted = 1/42105/0.00005 = 0.4750 // The cost is distributed as follows: // CPUCOST_HASH_PER_KEY + CPUCOST_HASH_PER_BYTE*4 + // HH_OP_INSERT_ROW_TO_CHAIN + CPUCOST_COPY_ROW_PER_BYTE * 4 // = 0.4750 // Thus we have: // 2* CPUCOST_HASH_PER_KEY + 0.01 + 0.0016*4 = 0.4750 // -> CPUCOST_HASH_PER_KEY = 0.4586/2 = 0.2293 // History: // Before 01/06/98: 0.02 // Change // CPUCOST_HASH_PER_BYTE // when changing this value DDflt0_(CPUCOST_HASH_PER_KEY, "1.29"), DDflt0_(CPUCOST_LIKE_COMPARE_OVERHEAD, "10."), DDflt0_(CPUCOST_LIKE_COMPARE_PER_BYTE, ".1"), DDflt0_(CPUCOST_LOCK_ROW, ".01"), DDflt0_(CPUCOST_NJ_TUPLST_FF, "10."), // Observation (A971125_1): // CPU time to scan 100,000 rows with no exe pred: 10 // CPU time to scan 100,000 rows with an exe pred like // nonkeycol < K: 11 // CPU time spend in every row: 1/100,000 = .00001 // Thus, at 0.00005 th. inst. per sec we have: 0.00001/0.00005 = // 0.2 thousand inst. to evaluate every row: // // Predicate comparison is very expensive right now (10/08/97) // (cost it that it takes like 1000 instruction for one comparison) // 10/08/97: 1. // Change // CPUCOST_COMPARE_SIMPLE_DATA_TYPE // when you change this value: // History // Before 04/30/98: .2 DDflt0_(CPUCOST_PREDICATE_COMPARISON, ".08"), // Cost of copying the data from disk to the DP2 Cache: DDflt0_(CPUCOST_SCAN_DSK_TO_DP2_PER_KB, "2.5"), DDflt0_(CPUCOST_SCAN_DSK_TO_DP2_PER_SEEK, "0.0"), // The communication between DP2 and ExeInDp2 requires to encode // and decode the key. DDflt0_(CPUCOST_SCAN_KEY_LENGTH, "0."), // The communication between DP2 and ExeInDp2 is complex and // ever changing. The following factor is introduced to // make the costing of scan fit observed CPU time for the scan: DDflt0_(CPUCOST_SCAN_OVH_PER_KB, "0.984215"), DDflt0_(CPUCOST_SCAN_OVH_PER_ROW, "0.0"), // It takes about 1/3 of a second to open a table, thus with a // 0.00005 ff for cpu elapsed time we get: // 1/3/0.00005 = 7000 thousands instructions // CPUCOST_SUBSET_OPEN lumps together all the overhead needed // to set-up the access to each partition. Thus it is a blocking // cost, nothing can overlap with it. DDflt0_(CPUCOST_SUBSET_OPEN, "7000"), DDflt0_(CPUCOST_SUBSET_OPEN_AFTER_FIRST, "1250"), DDflt0_(CPUCOST_TUPLE_REFERENCE, ".001"), DDui___(CREATE_DEFINITION_SCHEMA_VERSION, "0"), DDkwd__(CREATE_EXTERNAL_USER_NAME_INDEX, "OFF"), DDkwd__(CREATE_FOR_NO_RDF_REPLICATE, "OFF"), DDkwd__(CREATE_METADATA_TABLE, "OFF"), DDkwd__(CREATE_OBJECTS_IN_METADATA_ONLY, "OFF"), DDkwd__(CROSS_PRODUCT_CONTROL, "ON"), SDDui___(CYCLIC_ESP_PLACEMENT, "1"), // if this one is "ON" it overwrites optimizer heuristics 4 & 5 as "ON" // if it's "OFF" then the defaults of the two heuristics will be used DDkwd__(DATA_FLOW_OPTIMIZATION, "ON"), // DDL Default location support DDdskNS(DDL_DEFAULT_LOCATIONS, ""), DDkwd__(DDL_EXPLAIN, "OFF"), DDkwd__(DDL_TRANSACTIONS, "OFF"), // We ignore this setting for the first (SYSTEM_DEFAULTS) table open+read. DDkwd__(DEFAULTS_TABLE_ACCESS_WARNINGS, "OFF"), SDDkwd__(DEFAULT_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), XDDui1__(DEFAULT_DEGREE_OF_PARALLELISM, "2"), SDDkwd__(DEFAULT_SCHEMA_ACCESS_ONLY, "OFF"), SDDkwd__(DEFAULT_SCHEMA_NAMETYPE, "SYSTEM"), // These DEF_xxx values of "" get filled in by updateSystemParameters(). #define def_DEF_CHUNK_SIZE 5000000.0 #define str_DEF_CHUNK_SIZE "5000000.0" // DDui2__(DEF_CHUNK_SIZE, str_DEF_CHUNK_SIZE), DD_____(DEF_CPU_ARCHITECTURE, ""), DDui1__(DEF_DISCS_ON_CLUSTER, ""), DDui1__(DEF_INSTRUCTIONS_SECOND, ""), DDui___(DEF_LOCAL_CLUSTER_NUMBER, ""), DDui___(DEF_LOCAL_SMP_NODE_NUMBER, ""), //DEF_MAX_HISTORY_ROWS made external RV 06/21/01 CR 10-010425-2440 XDDui1__(DEF_MAX_HISTORY_ROWS, "1024"), DDui___(DEF_NUM_BM_CHUNKS, ""), DDui1__(DEF_NUM_NODES_IN_ACTIVE_CLUSTERS, ""), DDui1__(DEF_NUM_SMP_CPUS, ""), DDui2__(DEF_PAGE_SIZE, ""), DDui1__(DEF_PHYSICAL_MEMORY_AVAILABLE, ""), DDui1__(DEF_TOTAL_MEMORY_AVAILABLE, ""), DDui1__(DEF_VIRTUAL_MEMORY_AVAILABLE, ""), DDkwd__(DESTROY_ORDER_AFTER_REPARTITIONING, "OFF"), // detailed executor statistics DDkwd__(DETAILED_STATISTICS, "OPERATOR"), DDkwd__(DIMENSIONAL_QUERY_OPTIMIZATION, "OFF"), DDkwd__(DISABLE_BUFFERED_INSERTS, "OFF"), DDkwd__(DISABLE_READ_ONLY, "OFF"), DD_____(DISPLAY_DATA_FLOW_GRAPH, "OFF"), XDDkwd__(DISPLAY_DIVISION_BY_COLUMNS, "OFF"), // opens are distributed among all partitions instead of just root. // 0: no distribution, only use root. // -1: max distribution, all partitions // <number>: num of partitions per segment DDint__(DISTRIBUTE_OPENS, "-1"), // temp. disable dop reduction logic DDflt0_(DOP_REDUCTION_ROWCOUNT_THRESHOLD, "0.0"), DDkwd__(DO_MINIMAL_RENAME, "OFF"), // if set, then space needed for executor structures at runtime is // optimized such that the allocation starts with a low number and then // is allocated on a need basis. This means that we may have to allocate // more smaller chunks if much space is needed. But it helps in the case // where many plans are being used and each one only takes a small amount // of space. This optimization especially helps in case of Dp2 fragments // as there is only a finite amount of space available there. Once that // limit is reached, and a new plan is shipped, it means that an existing // eid plan from dp2 memory need to be swapped out and then refixed up. // By reducing space utilization, we end up with more eid sessions in // use inside of dp2. DDkwd__(DO_RUNTIME_EID_SPACE_COMPUTATION, "OFF"), DDkwd__(DO_RUNTIME_SPACE_OPTIMIZATION, "OFF"), DDui2__(DP2_BLOCK_HEADER_SIZE, "96"), // DP2 Cache defaults as of 06/08/98. DDui1__(DP2_CACHE_1024_BLOCKS, "152"), DDui1__(DP2_CACHE_16K_BLOCKS, "1024"), DDui1__(DP2_CACHE_2048_BLOCKS, "150"), DDui1__(DP2_CACHE_32K_BLOCKS, "512"), DDui1__(DP2_CACHE_4096_BLOCKS, "4096"), DDui1__(DP2_CACHE_512_BLOCKS, "152"), DDui1__(DP2_CACHE_8K_BLOCKS, "2048"), // The cache size is about 2000 pages @ 4k each page DDui1__(DP2_CACHE_SIZE_IN_KB, "8000"), // Exchange Costing // 6/12/98. // End of buffer header is 32 bytes or .0313 KB. // Each Exchange->DP2 request is 48 bytes or .0469 KB. DDflte_(DP2_END_OF_BUFFER_HEADER_SIZE, ".0313"), DDflte_(DP2_EXCHANGE_REQUEST_SIZE, ".0469"), DDpct__(DP2_FRACTION_SEEK_FROM_RANDOM_TO_INORDER, "25"), DDui2__(DP2_MAX_READ_PER_ACCESS_IN_KB, "256"), // The buffer size, as of 10/07/97 is 32K DDui2__(DP2_MESSAGE_BUFFER_SIZE, "56"), // Exchange Costing // 6/12/98. // Message header for Exchange->DP2 is 18 bytes or .0176 KB DDflte_(DP2_MESSAGE_HEADER_SIZE, ".0176"), DDui2__(DP2_MESSAGE_HEADER_SIZE_BYTES, "18"), DDui1__(DP2_MINIMUM_FILE_SIZE_FOR_SEEK_IN_BLOCKS, "256"), DDint__(DP2_PRIORITY, "-1001"), DDint__(DP2_PRIORITY_DELTA, "-1001"), DDui1__(DP2_SEQ_READS_WITHOUT_SEEKS, "100"), DDkwd__(DYNAMIC_HISTOGRAM_COMPRESSION, "ON"), DDui2__(DYN_PA_QUEUE_RESIZE_INIT_DOWN, "1024"), DDui2__(DYN_PA_QUEUE_RESIZE_INIT_UP, "1024"), DDui2__(DYN_QUEUE_RESIZE_FACTOR, "4"), DDui2__(DYN_QUEUE_RESIZE_INIT_DOWN, "4"), DDui2__(DYN_QUEUE_RESIZE_INIT_UP, "4"), DDui1__(DYN_QUEUE_RESIZE_LIMIT, "9"), DDkwd__(EID_SPACE_USAGE_OPT, "OFF"), // For both of these CQDs see executor/ExDp2Trace.h for values. DDint__(EID_TRACE_STATES, "0"), DDtp___(EID_TRACE_STR, ""), DDkwd__(ELIMINATE_REDUNDANT_JOINS, "ON"), DDkwd__(ENABLE_DP2_XNS, "OFF"), DDSint__(ESP_ASSIGN_DEPTH, "0"), DDSint__(ESP_FIXUP_PRIORITY_DELTA, "0"), DDSint__(ESP_IDLE_TIMEOUT, "0"), DDkwd__(ESP_MULTI_FRAGMENTS, "ON"), DDkwd__(ESP_MULTI_FRAGMENT_QUOTAS, "ON"), DDui1500_4000(ESP_MULTI_FRAGMENT_QUOTA_VM, "4000"), DDui1_6(ESP_NUM_FRAGMENTS, "3"), DDui1_6(ESP_NUM_FRAGMENTS_WITH_QUOTAS, "6"), DDkwd__(ESP_ON_AGGREGATION_NODES_ONLY, "OFF"), DDSint__(ESP_PRIORITY, "0"), DDSint__(ESP_PRIORITY_DELTA, "0"), DDkwd__(ESTIMATE_HBASE_ROW_COUNT, "ON"), // Disable hints - if SYSTEM, enable on SSD, and disable only on HDD DDkwd__(EXE_BMO_DISABLE_CMP_HINTS_OVERFLOW_HASH, "SYSTEM"), DDkwd__(EXE_BMO_DISABLE_CMP_HINTS_OVERFLOW_SORT, "SYSTEM"), DDkwd__(EXE_BMO_DISABLE_OVERFLOW, "OFF"), DDui___(EXE_BMO_MIN_SIZE_BEFORE_PRESSURE_CHECK_IN_MB, "50"), DDkwd__(EXE_BMO_SET_BUFFERED_WRITES, "OFF"), SDDkwd__(EXE_DIAGNOSTIC_EVENTS, "OFF"), DDui1__(EXE_HGB_INITIAL_HT_SIZE, "262144"), // == hash buffer DDflt__(EXE_HJ_MIN_NUM_CLUSTERS, "4"), DDkwd__(EXE_LOG_RETRY_IPC, "OFF"), // Total size of memory (in MB) available to BMOs (e.g., 1200 MB) SDDui___(EXE_MEMORY_AVAILABLE_IN_MB, "1200"), SDDui___(EXE_MEMORY_FOR_PARTIALHGB_IN_MB, "100"), // lower-bound memory limit for BMOs/nbmos (in MB) DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_EXCHANGE, "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_HASHGROUPBY , "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_HASHJOIN, "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_MERGEJOIN, "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_PA , "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_PROBE_CACHE , "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_SEQUENCE , "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_SORT , "10"), // total memory limit per CPU per query in MB DDpct1_50(EXE_MEMORY_LIMIT_NONBMOS_PERCENT, "15"), XDDui___(EXE_MEMORY_LIMIT_PER_CPU, "0"), // Memory not available for BMOs in master fragment in mxosrvr // (mostly due to QIO). DDui___(EXE_MEMORY_RESERVED_FOR_MXOSRVR_IN_MB,"544"), // Override the memory quota system; set limit per each and every BMO SDDflt__(EXE_MEM_LIMIT_PER_BMO_IN_MB, "0"), DDui1__(EXE_NUM_CONCURRENT_SCRATCH_IOS, "4"), // DDkwd__(EXE_PARALLEL_DDL, "ON"), DDkwd__(EXE_PARALLEL_PURGEDATA, "MINIMUM"), DDkwd__(EXE_PARALLEL_PURGEDATA_WARNINGS, "OFF"), DDui___(EXE_PA_DP2_STATIC_AFFINITY, "1"), DDkwd__(EXE_SINGLE_BMO_QUOTA, "ON"), // The following 3 are only for testing overflow; zero value means: ignore DDui___(EXE_TEST_FORCE_CLUSTER_SPLIT_AFTER_MB, "0"), DDui___(EXE_TEST_FORCE_HASH_LOOP_AFTER_NUM_BUFFERS, "0"), DDui___(EXE_TEST_HASH_FORCE_OVERFLOW_EVERY, "0"), DDkwd__(EXE_UTIL_RWRS, "OFF"), DDkwd__(EXPAND_DP2_SHORT_ROWS, "ON"), XDDui___(EXPLAIN_DESCRIPTION_COLUMN_SIZE, "-1"), DDkwd__(EXPLAIN_DETAIL_COST_FOR_CALIBRATION, "FALSE"), DDkwd__(EXPLAIN_DISPLAY_FORMAT, "EXTERNAL"), DDkwd__(EXPLAIN_IN_RMS, "OFF"), DDui___(EXPLAIN_OUTPUT_ROW_SIZE, "80"), DDui1__(EXPLAIN_ROOT_INPUT_VARS_MAX, "2000"), // maximum number of inputs that we can tolerate to // explain information for inputVars expression // this is needed to avoid stack overflow DDkwd__(EXPLAIN_SPACE_OPT, "ON"), DDkwd__(EXPLAIN_STRATEGIZER_PARAMETERS, "OFF"), DDflte_(EX_OP_ALLOCATE_ATP, ".02"), // Calibration // 01/23/98: 50. // Original: .1 DDflte_(EX_OP_ALLOCATE_BUFFER, "50."), DDflte_(EX_OP_ALLOCATE_BUFFER_POOL, ".1"), DDflte_(EX_OP_ALLOCATE_TUPLE, ".05"), // copy_atp affects the costing of NJ // History: // 08/21/98: 0.02, The previous change affected more than one operrator // 08/13/98: 1.0 // 01/08/98: 0.02 DDflte_(EX_OP_COPY_ATP, "1.1335"), DDflte_(EX_OP_DEQUEUE, ".02"), DDflte_(EX_OP_ENQUEUE, ".02"), DDkwd__(FAKE_VOLUME_ASSIGNMENTS, "OFF"), DDui1__(FAKE_VOLUME_NUM_VOLUMES, "24"), DDkwd__(FAST_DELETE, "OFF"), DDkwd__(FAST_DP2_SUBSET_OPT, "ON"), // upper and lower limit (2,10) must be in sync with error values in //ExFastTransport.cpp DDkwd__(FAST_EXTRACT_DIAGS, "OFF"), DDui2_10(FAST_EXTRACT_IO_BUFFERS, "6"), DDui___(FAST_EXTRACT_IO_TIMEOUT_SEC, "60"), DDkwd__(FAST_REPLYDATA_MOVE, "ON"), SDDkwd__(FFDC_DIALOUTS_FOR_MXCMP, "OFF"), DDkwd__(FIND_COMMON_SUBEXPRS_IN_OR, "ON"), DDkwd__(FLOATTYPE, "IEEE"), DDui___(FLOAT_ESP_RANDOM_NUM_SEED, "0"), DDkwd__(FORCE_BUSHY_CQS, "ON"), DDkwd__(FORCE_PARALLEL_CREATE_INDEX, "OFF"), DDkwd__(FORCE_PARALLEL_INSERT_SELECT, "OFF"), DDkwd__(FORCE_PASS_ONE, "OFF"), DDkwd__(FORCE_PASS_TWO, "ON"), // Control if plan fragments need to be compressed // DDui___(FRAG_COMPRESSION_THRESHOLD, "16"), // Controls FSO Tests for debug // DDui___(FSO_RUN_TESTS, "0"), // Controls use of Simple File Scan Optimizer // IF 0 - Use original "Complex" File Scan Optimizer. // (in case simple causes problems) // IF 1 - Use logic to determine FSO to use. (default) // IF 2 - Use logic to determine FSO to use, but also use new // executor predicate costing. // IF >2 - Always use new "Simple" File Scan Optimizer. // (not recommended) // DDui___(FSO_TO_USE, "1"), // Disallow/Allow full outer joins in MultiJoin framework DDkwd__(FULL_OUTER_JOINS_SPOIL_JBB, "OFF"), DDkwd__(GA_PROP_INDEXES_ARITY_1, "ON"), // this default value is filled in // NADefaults::initCurrentDefaultsWithDefaultDefaults. The default value // is ON for static compiles and OFF for dynamic queries. DDkwd__(GENERATE_EXPLAIN, "ON"), DDipcBu(GEN_ALIGNED_PA_DP2_BUFFER_SIZE, "31000"), DDui1__(GEN_CBUF_BUFFER_SIZE, "30000"), DDui1__(GEN_CBUF_NUM_BUFFERS, "4"), DDui1__(GEN_CBUF_SIZE_DOWN, "8"), DDui1__(GEN_CBUF_SIZE_UP, "8"), DDui___(GEN_CS_BUFFER_SIZE, "0"), DDui___(GEN_CS_NUM_BUFFERS, "0"), DDui___(GEN_CS_SIZE_DOWN, "4"), DDui___(GEN_CS_SIZE_UP, "4"), DDkwd__(GEN_DBLIMITS_LARGER_BUFSIZE, "ON"), DDui1__(GEN_DDL_BUFFER_SIZE, "30000"), DDui1__(GEN_DDL_NUM_BUFFERS, "4"), DDui1__(GEN_DDL_SIZE_DOWN, "2"), DDui1__(GEN_DDL_SIZE_UP, "32"), DDui1__(GEN_DEL_BUFFER_SIZE, "512"), DDui1__(GEN_DEL_NUM_BUFFERS, "5"), DDui1__(GEN_DEL_SIZE_DOWN, "2"), DDui1__(GEN_DEL_SIZE_UP, "2"), DDui1__(GEN_DESC_BUFFER_SIZE, "10240"), DDui1__(GEN_DESC_NUM_BUFFERS, "4"), DDui1__(GEN_DESC_SIZE_DOWN, "2"), DDui1__(GEN_DESC_SIZE_UP, "16"), DDui1__(GEN_DP2I_BUFFER_SIZE, "10000"), DDui1__(GEN_DP2I_NUM_BUFFERS, "2"), DDui1__(GEN_DP2I_SIZE_DOWN, "32"), DDui1__(GEN_DP2I_SIZE_UP, "64"), DDui1__(GEN_DPDU_BUFFER_SIZE, "2"), DDui1__(GEN_DPDU_NUM_BUFFERS, "1"), DDui1__(GEN_DPDU_SIZE_DOWN, "2"), DDui1__(GEN_DPDU_SIZE_UP, "2"), DDui1__(GEN_DPRO_BUFFER_SIZE, "10240"), DDui1__(GEN_DPRO_NUM_BUFFERS, "1"), DDui1__(GEN_DPRO_SIZE_DOWN, "16"), DDui1__(GEN_DPRO_SIZE_UP, "16"), DDui1__(GEN_DPSO_BUFFER_SIZE, "10240"), DDui1__(GEN_DPSO_NUM_BUFFERS, "4"), DDui1__(GEN_DPSO_SIZE_DOWN, "2048"), DDui1__(GEN_DPSO_SIZE_UP, "2048"), DDui1__(GEN_DPUO_BUFFER_SIZE, "10000"), DDui1__(GEN_DPUO_NUM_BUFFERS, "4"), DDui1__(GEN_DPUO_SIZE_DOWN, "2048"), DDui1__(GEN_DPUO_SIZE_UP, "2048"), DDui1__(GEN_DPVI_BUFFER_SIZE, "10000"), DDui1__(GEN_DPVI_NUM_BUFFERS, "2"), DDui1__(GEN_DPVI_SIZE_DOWN, "32"), DDui1__(GEN_DPVI_SIZE_UP, "64"), DDui___(GEN_EIDR_BROKEN_TREE_CHECK_INTERVAL, "128"), DDipcBu(GEN_EIDR_BUFFER_SIZE, "31000"), DDui1__(GEN_EIDR_NUM_BUFFERS, "3"), DDui1__(GEN_EIDR_SIZE_DOWN, "2"), DDui1__(GEN_EIDR_SIZE_UP, "2"), DDui___(GEN_EIDR_STATS_REPLY_INTERVAL, "3000"), DDint__(GEN_EXCHANGE_MAX_MEM_IN_KB, "4000"), DDint__(GEN_EXCHANGE_MSG_COUNT, "80"), // Fast extract settings are for UDR method invocations DDui1__(GEN_FE_BUFFER_SIZE, "31000"), DDui1__(GEN_FE_NUM_BUFFERS, "2"), DDui1__(GEN_FE_SIZE_DOWN, "4"), DDui1__(GEN_FE_SIZE_UP, "4"), DDui1__(GEN_FSRT_BUFFER_SIZE, "5120"), DDui1__(GEN_FSRT_NUM_BUFFERS, "5"), DDui1__(GEN_FSRT_SIZE_DOWN, "2"), DDui1__(GEN_FSRT_SIZE_UP, "8"), // Do not alter the buffer size; it must be 56K for SCRATCH_MGMT_OPTION == 5 DDui1__(GEN_HGBY_BUFFER_SIZE, "262144"), DDui1__(GEN_HGBY_NUM_BUFFERS , "5"), DDui1__(GEN_HGBY_PARTIAL_GROUP_FLUSH_THRESHOLD, "100"), DDui___(GEN_HGBY_PARTIAL_GROUP_ROWS_PER_CLUSTER, "0"), DDui1__(GEN_HGBY_SIZE_DOWN, "2048"), DDui1__(GEN_HGBY_SIZE_UP, "2048"), // Do not alter the buffer size; it must be 56K for SCRATCH_MGMT_OPTION == 5 DDui1__(GEN_HSHJ_BUFFER_SIZE, "262144"), // Controls use of the hash join min/max optimization. DDkwd__(GEN_HSHJ_MIN_MAX_OPT, "OFF"), DDui1__(GEN_HSHJ_NUM_BUFFERS, "1"), DDui1__(GEN_HSHJ_SIZE_DOWN, "2048"), DDui1__(GEN_HSHJ_SIZE_UP, "2048"), DDui1__(GEN_IAR_BUFFER_SIZE, "10240"), DDui1__(GEN_IAR_NUM_BUFFERS, "1"), DDui1__(GEN_IAR_SIZE_DOWN, "2"), DDui1__(GEN_IAR_SIZE_UP, "4"), DDui1__(GEN_IMDT_BUFFER_SIZE, "2"), DDui1__(GEN_IMDT_NUM_BUFFERS, "1"), DDui1__(GEN_IMDT_SIZE_DOWN, "2"), DDui1__(GEN_IMDT_SIZE_UP, "2"), DDui1__(GEN_INS_BUFFER_SIZE, "10240"), DDui1__(GEN_INS_NUM_BUFFERS, "3"), DDui1__(GEN_INS_SIZE_DOWN, "4"), DDui1__(GEN_INS_SIZE_UP, "128"), // Controls LeanEr Expression generation DDkwd__(GEN_LEANER_EXPRESSIONS, "ON"), DDui1__(GEN_LOCK_BUFFER_SIZE, "1024"), DDui1__(GEN_LOCK_NUM_BUFFERS, "1"), DDui1__(GEN_LOCK_SIZE_DOWN, "4"), DDui1__(GEN_LOCK_SIZE_UP, "4"), DDui1__(GEN_MATR_BUFFER_SIZE, "2"), DDui1__(GEN_MATR_NUM_BUFFERS, "1"), DDui1__(GEN_MATR_SIZE_DOWN, "2"), DDui1__(GEN_MATR_SIZE_UP, "8"), DDui___(GEN_MAX_NUM_PART_DISK_ENTRIES, "3"), DDui___(GEN_MAX_NUM_PART_NODE_ENTRIES, "255"), DDui1__(GEN_MEM_PRESSURE_THRESHOLD, "100"), DDui1__(GEN_MJ_BUFFER_SIZE, "32768"), DDui1__(GEN_MJ_NUM_BUFFERS, "1"), DDui1__(GEN_MJ_SIZE_DOWN, "2"), DDui1__(GEN_MJ_SIZE_UP, "1024"), DDui1__(GEN_ONLJ_BUFFER_SIZE, "5120"), DDui1__(GEN_ONLJ_LEFT_CHILD_QUEUE_DOWN, "4"), DDui1__(GEN_ONLJ_LEFT_CHILD_QUEUE_UP, "2048"), DDui1__(GEN_ONLJ_NUM_BUFFERS, "5"), DDui1__(GEN_ONLJ_RIGHT_SIDE_QUEUE_DOWN, "2048"), DDui1__(GEN_ONLJ_RIGHT_SIDE_QUEUE_UP, "2048"), DDkwd__(GEN_ONLJ_SET_QUEUE_LEFT, "ON"), DDkwd__(GEN_ONLJ_SET_QUEUE_RIGHT, "ON"), DDui1__(GEN_ONLJ_SIZE_DOWN, "2048"), DDui1__(GEN_ONLJ_SIZE_UP, "2048"), DDui1__(GEN_PAR_LAB_OP_BUFFER_SIZE, "1024"), DDui1__(GEN_PAR_LAB_OP_NUM_BUFFERS, "1"), DDui1__(GEN_PAR_LAB_OP_SIZE_DOWN, "2"), DDui1__(GEN_PAR_LAB_OP_SIZE_UP, "4"), DDipcBu(GEN_PA_BUFFER_SIZE, "31000"), DDui1__(GEN_PA_NUM_BUFFERS, "5"), DDui1__(GEN_PA_SIZE_DOWN, "2048"), DDui1__(GEN_PA_SIZE_UP, "2048"), DDui1__(GEN_PROBE_CACHE_NUM_ENTRIES, "16384"),// number of entries DDui___(GEN_PROBE_CACHE_NUM_INNER, "0"), //0 means compiler decides DDui1__(GEN_PROBE_CACHE_SIZE_DOWN, "2048"), DDui1__(GEN_PROBE_CACHE_SIZE_UP, "2048"), DDui1__(GEN_RCRS_BUFFER_SIZE, "2"), DDui1__(GEN_RCRS_NUM_BUFFERS, "1"), DDui1__(GEN_RCRS_SIZE_DOWN, "8"), DDui1__(GEN_RCRS_SIZE_UP, "16"), DDkwd__(GEN_RESET_ACCESS_COUNTER, "OFF"), DDui1__(GEN_ROOT_BUFFER_SIZE, "2"), DDui1__(GEN_ROOT_NUM_BUFFERS, "1"), DDui1__(GEN_ROOT_SIZE_DOWN, "2"), DDui1__(GEN_ROOT_SIZE_UP, "2"), DDui1__(GEN_SAMPLE_BUFFER_SIZE, "5120"), DDui1__(GEN_SAMPLE_NUM_BUFFERS, "5"), DDui1__(GEN_SAMPLE_SIZE_DOWN, "16"), DDui1__(GEN_SAMPLE_SIZE_UP, "16"), DDui1__(GEN_SCAN_BUFFER_SIZE, "10240"), DDui1__(GEN_SCAN_NUM_BUFFERS, "10"), DDui1__(GEN_SCAN_SIZE_DOWN, "16"), DDui1__(GEN_SCAN_SIZE_UP, "32"), DDui1__(GEN_SEQFUNC_BUFFER_SIZE, "5120"), DDui1__(GEN_SEQFUNC_NUM_BUFFERS, "5"), DDui1__(GEN_SEQFUNC_SIZE_DOWN, "16"), DDui1__(GEN_SEQFUNC_SIZE_UP, "16"), DDkwd__(GEN_SEQFUNC_UNLIMITED_HISTORY, "OFF"), DDui1__(GEN_SEQ_BUFFER_SIZE, "512"), DDui1__(GEN_SEQ_NUM_BUFFERS, "5"), DDui1__(GEN_SEQ_SIZE_DOWN, "2"), DDui1__(GEN_SEQ_SIZE_UP, "2"), DDui1__(GEN_SGBY_BUFFER_SIZE, "5120"), DDui1__(GEN_SGBY_NUM_BUFFERS, "5"), DDui1__(GEN_SGBY_SIZE_DOWN, "2048"), DDui1__(GEN_SGBY_SIZE_UP, "2048"), DDui1__(GEN_SID_BUFFER_SIZE, "1024"), DDui1__(GEN_SID_NUM_BUFFERS, "4"), DDui1__(GEN_SNDB_BUFFER_SIZE, "2"), DDui1__(GEN_SNDB_NUM_BUFFERS, "4"), DDui1__(GEN_SNDB_SIZE_DOWN, "4"), DDui1__(GEN_SNDB_SIZE_UP, "128"), DDui___(GEN_SNDT_BUFFER_SIZE_DOWN, "0"), DDui___(GEN_SNDT_BUFFER_SIZE_UP, "0"), DDui1__(GEN_SNDT_NUM_BUFFERS, "2"), DDkwd__(GEN_SNDT_RESTRICT_SEND_BUFFERS, "ON"), DDui1__(GEN_SNDT_SIZE_DOWN, "4"), DDui1__(GEN_SNDT_SIZE_UP, "128"), DDui1__(GEN_SORT_MAX_BUFFER_SIZE, "5242880"), DDui1__(GEN_SORT_MAX_NUM_BUFFERS, "160"), DDui___(GEN_SORT_MIN_BUFFER_SIZE, "0"), DDui1__(GEN_SORT_NUM_BUFFERS, "4"), DDui1__(GEN_SORT_SIZE_DOWN, "2"), DDui1__(GEN_SORT_SIZE_UP, "1024"), DDui1__(GEN_SPLB_BUFFER_SIZE, "2"), DDui1__(GEN_SPLB_NUM_BUFFERS, "1"), DDui1__(GEN_SPLB_SIZE_DOWN, "2"), DDui1__(GEN_SPLB_SIZE_UP, "2"), DDui1__(GEN_SPLT_BUFFER_SIZE, "2"), DDui1__(GEN_SPLT_NUM_BUFFERS, "1"), DDui1__(GEN_SPLT_SIZE_DOWN, "2048"), DDui1__(GEN_SPLT_SIZE_UP, "2048"), DDui1__(GEN_STPR_BUFFER_SIZE, "1024"), DDui1__(GEN_STPR_NUM_BUFFERS, "3"), DDui1__(GEN_STPR_SIZE_DOWN, "2"), DDui1__(GEN_STPR_SIZE_UP, "2"), DDui1__(GEN_TFLO_BUFFER_SIZE, "5120"), DDui1__(GEN_TFLO_NUM_BUFFERS, "2"), DDui1__(GEN_TFLO_SIZE_DOWN, "8"), DDui1__(GEN_TFLO_SIZE_UP, "16"), DDui512(GEN_TIMEOUT_BUFFER_SIZE, "4096"), DDui1__(GEN_TIMEOUT_NUM_BUFFERS, "1"), DDui2__(GEN_TIMEOUT_SIZE_DOWN, "2"), DDui2__(GEN_TIMEOUT_SIZE_UP, "4"), DDui1__(GEN_TRAN_BUFFER_SIZE, "4096"), DDui1__(GEN_TRAN_NUM_BUFFERS, "1"), DDui1__(GEN_TRAN_SIZE_DOWN, "2"), DDui1__(GEN_TRAN_SIZE_UP, "4"), DDui1__(GEN_TRSP_BUFFER_SIZE, "10240"), DDui1__(GEN_TRSP_NUM_BUFFERS, "5"), DDui1__(GEN_TRSP_SIZE_DOWN, "16"), DDui1__(GEN_TRSP_SIZE_UP, "16"), DDui1__(GEN_TUPL_BUFFER_SIZE, "1024"), DDui1__(GEN_TUPL_NUM_BUFFERS, "4"), DDui1__(GEN_TUPL_SIZE_DOWN, "2048"), DDui1__(GEN_TUPL_SIZE_UP, "2048"), // GEN_UDRRS_ settings are for stored procedure result // set proxy plans DDui1__(GEN_UDRRS_BUFFER_SIZE, "31000"), DDui1__(GEN_UDRRS_NUM_BUFFERS, "2"), DDui1__(GEN_UDRRS_SIZE_DOWN, "4"), DDui1__(GEN_UDRRS_SIZE_UP, "128"), // GEN_UDR_ settings are for UDR method invocations DDui1__(GEN_UDR_BUFFER_SIZE, "31000"), DDui1__(GEN_UDR_NUM_BUFFERS, "2"), DDui1__(GEN_UDR_SIZE_DOWN, "4"), DDui1__(GEN_UDR_SIZE_UP, "4"), DDui1__(GEN_UNLJ_BUFFER_SIZE, "5120"), DDui1__(GEN_UNLJ_NUM_BUFFERS, "5"), DDui1__(GEN_UNLJ_SIZE_DOWN, "8"), DDui1__(GEN_UNLJ_SIZE_UP, "16"), DDui1__(GEN_UN_BUFFER_SIZE, "10240"), DDui1__(GEN_UN_NUM_BUFFERS, "5"), DDui1__(GEN_UN_SIZE_DOWN, "8"), DDui1__(GEN_UN_SIZE_UP, "16"), DDui1__(GEN_UPD_BUFFER_SIZE, "5120"), DDui1__(GEN_UPD_NUM_BUFFERS, "5"), DDui1__(GEN_UPD_SIZE_DOWN, "2"), DDui1__(GEN_UPD_SIZE_UP, "2"), // Used when Compressed_Internal_Format is on to reduce space in the // hash buffers (Hash Join and Hash Groupby) and sort buffers. DDkwd__(GEN_VARIABLE_LENGTH_BUFFERS, "OFF"), DDui1__(GEN_XPLN_BUFFER_SIZE, "4096"), DDui1__(GEN_XPLN_NUM_BUFFERS, "3"), DDui1__(GEN_XPLN_SIZE_DOWN, "8"), DDui1__(GEN_XPLN_SIZE_UP, "16"), // When less or equal to this CQD (5000 rows by default), a partial root // will be running in the Master. Set to 0 to disable the feature. DDint__(GROUP_BY_PARTIAL_ROOT_THRESHOLD, "5000"), DDkwd__(GROUP_BY_USING_ORDINAL, "MINIMUM"), // HASH_JOINS ON means do HASH_JOINS XDDkwd__(HASH_JOINS, "ON"), DDkwd__(HASH_JOINS_TYPE1_PLAN1, "ON"), DDkwd__(HASH_JOINS_TYPE1_PLAN2, "ON"), // HBase defaults // Some of the more important ones: // HBASE_CATALOG: Catalog of "_ROW_" and "_CELL_" schemas // HBASE_COPROCESSORS: Enable use of co-processors for aggregates. // need to set the coprocessor in HBase config file // HBASE_INTERFACE: JNI or JNI_TRX (transactional interface) // HBASE_MAX_COLUMN_xxx_LENGTH: Max length of some // string columns in the "_ROW_" and "_CELL_" schemas // HBASE_SQL_IUD_SEMANTICS: Off: Don't check for existing rows for insert/update DDkwd__(HBASE_ASYNC_DROP_TABLE, "OFF"), DDkwd__(HBASE_ASYNC_OPERATIONS, "OFF"), // HBASE_CACHE_BLOCKS, ON => cache every scan, OFF => cache no scan // SYSTEM => cache scans which take less than 1 RS block cache mem. DDui___(HBASE_BLOCK_SIZE, "65536"), DDkwd__(HBASE_CACHE_BLOCKS, "SYSTEM"), DD_____(HBASE_CATALOG, "HBASE"), DDkwd__(HBASE_CHECK_AND_UPDEL_OPT, "ON"), DDkwd__(HBASE_COMPRESSION_OPTION, ""), DDkwd__(HBASE_COPROCESSORS, "ON"), DDkwd__(HBASE_CREATE_OLD_MD_FOR_UPGRADE_TESTING, "OFF"), DDkwd__(HBASE_DATA_BLOCK_ENCODING_OPTION, ""), DDkwd__(HBASE_FILTER_PREDS, "OFF"), DDkwd__(HBASE_HASH2_PARTITIONING, "ON"), DDui___(HBASE_INDEX_LEVEL, "0"), DDui___(HBASE_MAX_COLUMN_INFO_LENGTH, "10000"), DDui___(HBASE_MAX_COLUMN_NAME_LENGTH, "100"), DDui___(HBASE_MAX_COLUMN_VAL_LENGTH, "1000"), DDui___(HBASE_MAX_ESPS, "9999"), DDui___(HBASE_MAX_NUM_SEARCH_KEYS, "512"), DDui1__(HBASE_MIN_BYTES_PER_ESP_PARTITION, "67108864"), DDkwd__(HBASE_NATIVE_IUD, "ON"), DDui1__(HBASE_NUM_CACHE_ROWS_MAX, "10000"), DDui1__(HBASE_NUM_CACHE_ROWS_MIN, "100"), DDkwd__(HBASE_RANGE_PARTITIONING, "ON"), DDkwd__(HBASE_RANGE_PARTITIONING_MC_SPLIT, "ON"), DDui___(HBASE_REGION_SERVER_MAX_HEAP_SIZE, "1024"), // in units of MB DDkwd__(HBASE_ROWSET_VSBB_OPT, "ON"), DDusht_(HBASE_ROWSET_VSBB_SIZE, "1000"), DDflt0_(HBASE_SALTED_TABLE_MAX_FILE_SIZE, "0"), DDkwd__(HBASE_SALTED_TABLE_SET_SPLIT_POLICY, "ON"), DD_____(HBASE_SCHEMA, "HBASE"), DDkwd__(HBASE_SERIALIZATION, "OFF"), DD_____(HBASE_SERVER, ""), DDkwd__(HBASE_SQL_IUD_SEMANTICS, "ON"), DDkwd__(HBASE_STATS_PARTITIONING, "ON"), DDkwd__(HBASE_TRANSFORM_UPDATE_TO_DELETE_INSERT, "OFF"), DDkwd__(HBASE_UPDEL_CURSOR_OPT, "ON"), DDui___(HBASE_USE_FAKED_REGIONS, "0"), DD_____(HBASE_ZOOKEEPER_PORT, ""), DDui1__(HDFS_IO_BUFFERSIZE, "65536"), DDui___(HDFS_IO_BUFFERSIZE_BYTES, "0"), DDui1__(HDFS_IO_RANGE_TAIL, "16384"), DDkwd__(HDFS_PREFETCH, "ON"), DDkwd__(HDFS_READ_CONTINUE_ON_ERROR, "OFF"), DDui1__(HDFS_REPLICATION, "1"), DDkwd__(HDFS_USE_CURSOR_MULTI, "OFF"), DDkwd__(HGB_BITMUX, "OFF"), DDflt0_(HGB_CPUCOST_INITIALIZE, "1."), DDflt0_(HGB_DP2_MEMORY_LIMIT, "10000."), DDflte_(HGB_GROUPING_FACTOR_FOR_SPILLED_CLUSTERS, ".5"), DDflte_(HGB_MAX_TABLE_SIZE_FOR_CLUSTERS, "4E5"), DDflte_(HGB_MEMORY_AVAILABLE_FOR_CLUSTERS, "10"), DDflte_(HH_OP_ALLOCATE_BUCKET_ARRAY, ".1"), DDflte_(HH_OP_ALLOCATE_CLUSTER, ".1"), DDflte_(HH_OP_ALLOCATE_CLUSTERDB, ".1"), DDflte_(HH_OP_ALLOCATE_HASH_TABLE, ".05"), DDflt1_(HH_OP_HASHED_ROW_OVERHEAD, "8."), // From observation: // 03/11/98: probing the hash table is very inexpensive, // thus reduce this to almost zero. // change // CPUCOST_HASH_PER_KEY // when changing this value // It takes around 2 seconds to insert 100,000 rows into the chain: // @ 0.00005 secs per k instr: // k instr= 2/0.00005/100000 = 0.4 // History: // Before 03/11/98: 0.4 // Initially: 0.01 DDflte_(HH_OP_INSERT_ROW_TO_CHAIN, "0.51"), // From observation: // 03/11/98: probing the hash table is very inexpensive, // thus reduce this to almost zero. // 01/05/98: 15,433 rows probed per second @ 0.00005 seconds // per thousand of instructions, give: // seconds to probe one row = 1/15,433 = 0.000064796 // This time includes: time to position and to compare. Thus // subtract the time to compare to arrive to the proper number: // thd. of instructions per row inserted = // 1/15,433/0.00005 - CPUCOST_COMPARE_SIMPLE_DATA_TYPE = // 1.2959 - 0.2 = 1.0959 // History: // Before 03/11/98: 1.0959 // Before 01/05/98: 0.01 DDflt0_(HH_OP_PROBE_HASH_TABLE, "0.011"), DDflt0_(HH_OP_READ_HASH_BUFFER, "0."), DDflt0_(HH_OP_WRITE_HASH_BUFFER, "0."), // Added 10/16/02 DDkwd__(HIDE_INDEXES, "NONE"), DDansi_(HISTOGRAMS_SCHEMA, ""), // ------------------------------------------------------------------------- // Histogram fudge factors // ------------------------------------------------------------------------- //HIST_BASE_REDUCTION and HIST_PREFETCH externalized 08/21/01 CR 10-010713-3895 DDkwd__(HIST_ASSUME_INDEPENDENT_REDUCTION, "ON"), XDDkwd__(HIST_AUTO_GENERATION_OF_SAMPLE, "OFF"), DDkwd__(HIST_BASE_REDUCTION, "ON"), DDflt0_(HIST_BASE_REDUCTION_FUDGE_FACTOR, "0.1"), DDflt0_(HIST_CONSTANT_ALPHA, "0.5"), DDflt_0_1(HIST_DEFAULT_BASE_SEL_FOR_LIKE_WILDCARD, "0.50"), DDui1__(HIST_DEFAULT_NUMBER_OF_INTERVALS, "50"), DDui1__(HIST_DEFAULT_SAMPLE_MAX, "1000000"), DDui1__(HIST_DEFAULT_SAMPLE_MIN, "10000"), DDflt_0_1(HIST_DEFAULT_SAMPLE_RATIO, "0.01"), DDflte_(HIST_DEFAULT_SEL_FOR_BOOLEAN, "0.3333"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_IS_NULL, "0.01"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_JOIN_EQUAL, "0.3333"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_JOIN_RANGE, "0.3333"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_LIKE_NO_WILDCARD,"1.0"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_LIKE_WILDCARD, "0.10"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_PRED_EQUAL, "0.01"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_PRED_RANGE, "0.3333"), DDflt1_(HIST_FETCHCOUNT_SCRATCH_VOL_THRESHOLD, "10240000"), DDkwd__(HIST_FREQ_VALS_NULL_FIX, "ON"), DDkwd__(HIST_INCLUDE_SKEW_FOR_NON_INNER_JOIN, "ON"), DDkwd__(HIST_INTERMEDIATE_REDUCTION, "OFF"), DDflt0_(HIST_INTERMEDIATE_REDUCTION_FUDGE_FACTOR, "0.25"), DDflt_0_1(HIST_JOIN_CARD_LOWBOUND, "1.0"), DDui1__(HIST_LOW_UEC_THRESHOLD, "55"), DDui1__(HIST_MAX_NUMBER_OF_INTERVALS, "10000"), DDkwd__(HIST_MC_STATS_NEEDED, "ON"), DDkwd__(HIST_MERGE_FREQ_VALS_FIX, "ON"), // Histogram min/max optimization: when the predicate is of form // T.A = MIN/MAX(S.B), replace the histogram(T.A) with // single_int_histogram(MIN/MAX(S.B)). Do this only when // there is no local predicate on S and there exists a frequent // value that is equals to MIN/MAX(S.B). DDkwd__(HIST_MIN_MAX_OPTIMIZATION, "ON"), // This CQD is used to control the number of missing stats warnings // that should be generated. // 0 ? Display no warnings. // 1 ? Display only missing single column stats warnings. These include 6008 and 6011 // 2 ? Display all single column missing stats warnings and // multi-column missing stats warnings for Scans only. // 3 ? Display all missing single column stats warnings and missing // multi-column stats warnings for Scans and Join operators only.. // 4 ? Display all missing single column stats and missing multi-column // stats warnings for all operators including Scans, Joins and GroupBys. // The CQD also does not have an impact on the auto update stats behavior. The stats will // still be automatically generated even if the warnings have been suppressed. // USTAT_AUTO_MISSING_STATS_LEVEL. // Default behavior is to generate all warnings XDDui___(HIST_MISSING_STATS_WARNING_LEVEL, "4"), // This specifies the time interval after which the fake statistics // should be refreshed. This was done primarirly for users // which did not want to update statistics on temporary tables. // If this statistics is cached, then this results in bad plans // These users can have this default set to 0, in which case histograms // with fake statistics will never be cached. Note that when ustat // automation is on, this value divided by 360 is used. XDDui___(HIST_NO_STATS_REFRESH_INTERVAL, "3600"), DDflt1_(HIST_NO_STATS_ROWCOUNT, "100"), DDflt1_(HIST_NO_STATS_UEC, "2"), DDflt1_(HIST_NO_STATS_UEC_CHAR1, "10"), DDui1__(HIST_NUM_ADDITIONAL_DAYS_TO_EXTRAPOLATE, "4"), DDintN1__(HIST_ON_DEMAND_STATS_SIZE, "0"), DDui___(HIST_OPTIMISTIC_CARD_OPTIMIZATION, "1"), XDDkwd__(HIST_PREFETCH, "ON"), XDDkwd__(HIST_REMOVE_TRAILING_BLANKS, "ON"), // should remove after verifying code is solid DDansi_(HIST_ROOT_NODE, ""), XDDflt1_(HIST_ROWCOUNT_REQUIRING_STATS, "50000"), DDflt0_(HIST_SAME_TABLE_PRED_REDUCTION, "0.0"), DDvol__(HIST_SCRATCH_VOL, ""), DDflt1_(HIST_SCRATCH_VOL_THRESHOLD, "104857600"), DDflt_0_1(HIST_SKEW_COST_ADJUSTMENT, "0.2"), DDkwd__(HIST_SKIP_MC_FOR_NONKEY_JOIN_COLUMNS, "OFF"), DDui___(HIST_TUPLE_FREQVAL_LIST_THRESHOLD, "40"), DDkwd__(HIST_USE_HIGH_FREQUENCY_INFO, "ON"), XDDkwd__(HIST_USE_SAMPLE_FOR_CARDINALITY_ESTIMATION , "ON"), // CQDs for Trafodion on Hive // Main ones to use: // HIVE_MAX_STRING_LENGTH: Hive "string" data type gets converted // into a VARCHAR with this length // HIVE_MIN_BYTES_PER_ESP_PARTITION: Make one ESP for this many bytes // HIVE_NUM_ESPS_PER_DATANODE: Equivalent of MAX_ESPS_PER_CPU_PER_OP // Note that this is really per SeaQuest node DD_____(HIVE_CATALOG, ""), DDkwd__(HIVE_DEFAULT_CHARSET, (char *)SQLCHARSETSTRING_UTF8), DD_____(HIVE_DEFAULT_SCHEMA, "HIVE"), DD_____(HIVE_FILE_NAME, "/hive/tpcds/customer/customer.dat" ), DD_____(HIVE_HDFS_STATS_LOG_FILE, ""), DDint__(HIVE_LIB_HDFS_PORT_OVERRIDE, "-1"), DDint__(HIVE_LOCALITY_BALANCE_LEVEL, "0"), DDui___(HIVE_MAX_ESPS, "9999"), DDui___(HIVE_MAX_STRING_LENGTH, "32000"), DDkwd__(HIVE_METADATA_JAVA_ACCESS, "ON"), DDint__(HIVE_METADATA_REFRESH_INTERVAL, "0"), DDflt0_(HIVE_MIN_BYTES_PER_ESP_PARTITION, "67108864"), DDui___(HIVE_NUM_ESPS_PER_DATANODE, "2"), DDpct__(HIVE_NUM_ESPS_ROUND_DEVIATION, "34"), DDkwd__(HIVE_SORT_HDFS_HOSTS, "ON"), DD_____(HIVE_USE_FAKE_SQ_NODE_NAMES, "" ), DDkwd__(HIVE_USE_FAKE_TABLE_DESC, "OFF"), DDkwd__(HIVE_USE_HASH2_AS_PARTFUNCION, "ON"), // ------------------------------------------------------------------------- DDui2__(HJ_BUFFER_SIZE, "32"), DDflt0_(HJ_CPUCOST_INITIALIZE, "1."), DDui1__(HJ_INITIAL_BUCKETS_PER_CLUSTER, "4."), DDkwd__(HJ_NEW_MCSB_PLAN, "OFF"), DDint__(HJ_SCAN_TO_NJ_PROBE_SPEED_RATIO, "2000"), DDkwd__(HJ_TYPE, "HYBRID"), DD_____(HP_ROUTINES_SCHEMA, "NEO.HP_ROUTINES"), // Must be in form <cat>.<sch> DDkwd__(HQC_CONVDOIT_DISABLE_NUMERIC_CHECK, "OFF"), DDkwd__(HQC_LOG, "OFF"), DD_____(HQC_LOG_FILE, ""), DDui1_10(HQC_MAX_VALUES_PER_KEY, "5"), DDkwd__(HYBRID_QUERY_CACHE, "ON"), DDkwd__(IF_LOCKED, "WAIT"), // ignore_duplicate_keys is no more valid. It is still // here as dummy for compatibility with existing scripts. DDkwd__(IGNORE_DUPLICATE_KEYS, "SYSTEM"), // in mode_special_1, duplicate rows are ignored if inserting a row in the // base table which has a user defined primary key. If this default is set // to OFF in mode_special_1, then duplicate rows are not ignored. // // If not in mode_special_1, and this default is ON, then duplicate rows // are ignored. DDkwd__(IGNORE_DUPLICATE_ROWS, "SYSTEM"), DDkwd__(IMPLICIT_DATETIME_INTERVAL_HOSTVAR_CONVERSION, "FALSE"), DDkwd__(IMPLICIT_HOSTVAR_CONVERSION, "FALSE"), // threshold for the number of rows inserted into a volatile/temp // table which will cause an automatic update stats. // -1 indicates do not upd stats. 0 indicates always upd stats. DDint__(IMPLICIT_UPD_STATS_THRESHOLD, "-1"), //"10000"), DDkwd__(INCORPORATE_SKEW_IN_COSTING, "ON"), DDkwd__(INDEX_ELIMINATION_LEVEL, "AGGRESSIVE"), DDui1__(INDEX_ELIMINATION_THRESHOLD, "50"), SDDkwd__(INFER_CHARSET, "OFF"), // UDF initial row cost CQDs DDui___(INITIAL_UDF_CPU_COST, "100"), DDui___(INITIAL_UDF_IO_COST, "1"), DDui___(INITIAL_UDF_MSG_COST, "2"), DDkwd__(INPUT_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), // SQLCHARSETSTRING_UTF8 XDDkwd__(INSERT_VSBB, "SYSTEM"), //10-040621-7139-begin //This CDQ will alllow the user to force the compiler to //choose an interactive access path. ie., prefer access path with //index in it. If such a path is not found which ever access path is //available is chosen. DDkwd__(INTERACTIVE_ACCESS, "OFF"), //10-040621-7139-end DDkwd__(IN_MEMORY_OBJECT_DEFN, "OFF"), DDflte_(IO_SEEKS_INORDER_FACTOR, "0.10"), // History: // 3/11/99 Changed to zero because in large tables the read-ahead // seems negligible (and/or hard to simulate) // Before 3/11/99: 0.58 DDflt0_(IO_TRANSFER_COST_PREFETCH_MISSES_FRACTION, "0."), XDDkwd__(ISOLATION_LEVEL, "READ_COMMITTED"), XDDkwd__(ISOLATION_LEVEL_FOR_UPDATES, "NONE"), SDDkwd__(ISO_MAPPING, (char *)SQLCHARSETSTRING_ISO88591), DDkwd__(IS_DB_TRANSPORTER, "OFF"), DDkwd__(IS_SQLCI, "FALSE"), DDkwd__(IUD_NONAUDITED_INDEX_MAINT, "OFF"), DDkwd__(JDBC_PROCESS, "FALSE"), // Force the join order given by the user XDDkwd__(JOIN_ORDER_BY_USER, "OFF"), DDkwd__(KEYLESS_NESTED_JOINS, "OFF"), XDDkwd__(LAST0_MODE, "OFF"), DDansi_(LDAP_USERNAME, ""), // Disallow/Allow left joins in MultiJoin framework DDkwd__(LEFT_JOINS_SPOIL_JBB, "OFF"), DDkwd__(LIMIT_HBASE_SCAN_DOP, "OFF"), // if this default is set to ON, then the max precision of a numeric // expression(arithmetic, aggregate) is limited to MAX_NUMERIC_PRECISION // (= 18). If this is set to OFF, the default value, then the max precision // is computed based on the operands and the operation which could make the // result a software datatype(BIGNUM). Software datatypes give better // precision but degraded performance. SDDkwd__(LIMIT_MAX_NUMERIC_PRECISION, "SYSTEM"), DDint__(LOB_HDFS_PORT, "0"), DD_____(LOB_HDFS_SERVER, "default"), // default size is 1 G (1000 M) DDint__(LOB_MAX_SIZE, "1000"), // default size is 32000. Change this to extract more data into memory. DDui___(LOB_OUTPUT_SIZE, "32000"), DD_____(LOB_STORAGE_FILE_DIR, "/lobs"), // storage types defined in exp/ExpLOBenum.h. // Default is hdfs_file (value = 1) DDint__(LOB_STORAGE_TYPE, "2"), //New default size for buffer size for local node DDui2__(LOCAL_MESSAGE_BUFFER_SIZE, "50"), DDansi_(MAINTAIN_CATALOG, "NEO"), // Set the maintain control table timeout to 5 minutes DDint__(MAINTAIN_CONTROL_TABLE_TIMEOUT, "30000"), DDint__(MAINTAIN_REORG_PRIORITY, "-1"), DDint__(MAINTAIN_REORG_PRIORITY_DELTA, "0"), DDint__(MAINTAIN_REORG_RATE, "40"), DDint__(MAINTAIN_REORG_SLACK, "0"), DDint__(MAINTAIN_UPD_STATS_SAMPLE, "-1"), DDkwd__(MARIAQUEST_PROCESS, "OFF"), DDSint__(MASTER_PRIORITY, "0"), DDSint__(MASTER_PRIORITY_DELTA, "0"), DDint__(MATCH_CONSTANTS_OF_EQUALITY_PREDICATES, "2"), DDui1__(MAX_ACCESS_NODES_PER_ESP, "1024"), // this is the default length of a param which is typed as a VARCHAR. DDui2__(MAX_CHAR_PARAM_DEFAULT_SIZE, "32"), DDint__(MAX_DEPTH_TO_CHECK_FOR_CYCLIC_PLAN, "1"), // default value of maximum dp2 groups for a hash-groupby DDui1__(MAX_DP2_HASHBY_GROUPS, "1000"), // // The max number of ESPs per cpu for a given operator. // i.e. this number times the number of available CPUs is "max pipelines". // // On Linux, "CPU" means cores. // DDflt__(MAX_ESPS_PER_CPU_PER_OP, "0.5"), DDui1__(MAX_EXPRS_USED_FOR_CONST_FOLDING, "1000"), // used in hash groupby costing in esp/master DDui1__(MAX_HEADER_ENTREIS_PER_HASH_TABLE, "250000"), DDui1__(MAX_LONG_VARCHAR_DEFAULT_SIZE, "2000"), DDui1__(MAX_LONG_WVARCHAR_DEFAULT_SIZE, "2000"), DD18_128(MAX_NUMERIC_PRECISION_ALLOWED, "128"), // The max number of vertical partitions for optimization to be done under // a VPJoin. DDui___(MAX_NUM_VERT_PARTS_FOR_OPT, "20"), DDui1__(MAX_ROWS_LOCKED_FOR_STABLE_ACCESS, "1"), // The max number of skewed values detected - skew buster DDui1__(MAX_SKEW_VALUES_DETECTED, "10000"), // multi-column skew inner table broadcast threashold in bytes (=1 MB) DDui___(MC_SKEW_INNER_BROADCAST_THRESHOLD, "1000000"), // multi-column skew sensitivity threshold // // For new MCSB (that is, we utilize MC skews directly), // apply the MC skew buster when // frequency of MC skews > MC_SKEW_SENSITIVITY_THRESHOLD / count_of_cpus // // For old MCSB (that is, we guess MC skews from SC skews), // apply the MC skew buster when // SFa,b... * countOfPipeline > MC_SKEW_SENSITIVITY_THRESHOLD // SFa,b ... is the skew factor for multi column a,b,... // XDDflt__(MC_SKEW_SENSITIVITY_THRESHOLD, "0.1"), DDflt0_(MDAM_CPUCOST_NET_OVH, "2000."), // The cost that takes to build the mdam network per predicate: // (we assume that the cost to build the mdam network is a linear function // of the key predicates) DDflt0_(MDAM_CPUCOST_NET_PER_PRED, ".5"), // controls the max. number of seek positions under which MDAM will be // allowed. Set it to 0 turns off the feature. XDDui___(MDAM_NO_STATS_POSITIONS_THRESHOLD, "10"), // MDAM_SCAN_METHOD ON means MDAM is enabled, // OFF means MDAM is disabled. MDAM is enabled by default // externalized 06/21/01 RV // mdam off on open source at this point XDDkwd__(MDAM_SCAN_METHOD, "ON"), DDflt0_(MDAM_SELECTION_DEFAULT, "0.5"), DDkwd__(MDAM_TRACING, "OFF"), // controls the max. number of probes at which MDAM under NJ plan will be // generated. Set it to 0 turns off the feature. XDDui___(MDAM_UNDER_NJ_PROBES_THRESHOLD, "0"), // controls the amount of penalty for CPU resource required that is // beyond the value specified by MDOP_CPUS_SOFT_LIMIT. The number of extra CPUs // actually allocated is computed as the origial value divided by the CQD. // If the CQD is set to 1 (default), then there is no penalty. DDflt1_(MDOP_CPUS_PENALTY, "70"), // specify the limit beyond which the number of CPUs will be limited. DDui1__(MDOP_CPUS_SOFT_LIMIT, "64"), // controls the amount of penalty for CPU resource per memory unit // required that is beyond the value specified by MDOP_CPUS_SOFT_LIMIT. // The number of extra CPUs actually allocated is computed as the // origial value divided by the CQD. DDflt1_(MDOP_MEMORY_PENALTY, "70"), // CQD to test/enforce heap memory upper limits // values are in KB DDui___(MEMORY_LIMIT_CMPCTXT_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_CMPSTMT_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_HISTCACHE_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_NATABLECACHE_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_QCACHE_UPPER_KB, "0"), // SQL/MX Compiler/Optimzer Memory Monitor. DDkwd__(MEMORY_MONITOR, "OFF"), DDui1__(MEMORY_MONITOR_AFTER_TASKS, "30000"), DDkwd__(MEMORY_MONITOR_IN_DETAIL, "OFF"), DD_____(MEMORY_MONITOR_LOGFILE, "NONE"), DDkwd__(MEMORY_MONITOR_LOG_INSTANTLY, "OFF"), DDui1__(MEMORY_MONITOR_TASK_INTERVAL, "5000"), // Hash join currently uses 20 Mb before it overflows, use this // as the limit DDui1__(MEMORY_UNITS_SIZE, "20480"), // amount of memory available per CPU for any query SDDflte_(MEMORY_UNIT_ESP, "300"), DDflt1_(MEMORY_USAGE_NICE_CONTEXT_FACTOR, "1"), DDflt1_(MEMORY_USAGE_OPT_PASS_FACTOR, "1.5"), DDui1__(MEMORY_USAGE_SAFETY_NET, "500"), // MERGE_JOINS ON means do MERGE_JOINS XDDkwd__(MERGE_JOINS, "ON"), DDkwd__(MERGE_JOIN_ACCEPT_MULTIPLE_NJ_PROBES, "ON"), DDkwd__(MERGE_JOIN_CONTROL, "OFF"), DDkwd__(MERGE_JOIN_WITH_POSSIBLE_DEADLOCK, "OFF"), SDDui___(METADATA_CACHE_SIZE, "20"), DDkwd__(METADATA_STABLE_ACCESS, "OFF"), //------------------------------------------------------------------- // Minimum ESP parallelism. If the user does not specify this value // (default value 0 does not change) then the number of segments // (totalNumCPUs/16, where totalNumCPUs=gpClusterInfo->numOfSMPs()) // will be used as the value of minimum ESP parallelism. If user sets // this value it should be integer between 1 and totalNumCPUs. In // this case actual value of minimum ESP parallelism will be // min(CDQ value, MDOP), where MDOP (maximum degree of parallelism) // is defined by adaptive segmentation //------------------------------------------------------------------- DDui___(MINIMUM_ESP_PARALLELISM, "0"), DDui1__(MIN_LONG_VARCHAR_DEFAULT_SIZE, "1"), DDui1__(MIN_LONG_WVARCHAR_DEFAULT_SIZE, "1"), DDkwd__(MIN_MAX_OPTIMIZATION, "ON"), DDpct__(MJ_BMO_QUOTA_PERCENT, "0"), DDflt0_(MJ_CPUCOST_ALLOCATE_LIST, ".05"), DDflt0_(MJ_CPUCOST_CLEAR_LIST, ".01"), DDflt0_(MJ_CPUCOST_GET_NEXT_ROW_FROM_LIST, ".01"), // calibrated 01/16/98: // 01/13/98 40000., this did not work with small tables // Before 01/13/98: 0.5 DDflt0_(MJ_CPUCOST_INITIALIZE, "1."), // Before 03/12/98: 0.4 // Before 01/13/98: 0.01 DDflt0_(MJ_CPUCOST_INSERT_ROW_TO_LIST, ".0001"), DDflt0_(MJ_CPUCOST_REWIND_LIST, ".01"), DDflte_(MJ_LIST_NODE_SIZE, ".01"), DDkwd__(MJ_OVERFLOW, "ON"), DDkwd__(MODE_SEABASE, "ON"), DDkwd__(MODE_SEAHIVE, "ON"), SDDkwd__(MODE_SPECIAL_1, "OFF"), SDDkwd__(MODE_SPECIAL_2, "OFF"), // enable special features in R2.93 DDkwd__(MODE_SPECIAL_3, "OFF"), DDkwd__(MODE_SPECIAL_4, "OFF"), DDkwd__(MODE_SPECIAL_5, "OFF"), DDnsklo(MP_CATALOG, "$SYSTEM.SQL"), DDnsksv(MP_SUBVOLUME, "SUBVOL"), DDnsksy(MP_SYSTEM, ""), DDnskv_(MP_VOLUME, "$VOL"), DDflt0_(MSCF_CONCURRENCY_IO, "0.10"), DDflt0_(MSCF_CONCURRENCY_MSG, "0.10"), // Tests suggest that RELEASE is about 2.5 times faster than DEBUG // RELEASE is always faster than DEBUG code so this default must be // at least one. DDflt1_(MSCF_DEBUG_TO_RELEASE_MULTIPLIER, "2.5"), // MSCF_ET_CPU units are seconds/thousand of CPU instructions // History: // Before 02/01/99, the speed was calibrated for debug, now its is for // release: 0.00005 DDflte_(MSCF_ET_CPU, "0.000014"), // was 0.00002 12/2k // MSCF_ET_IO_TRANSFER units are seconds/Kb // History // Changed to '0.000455' to reflect new calibration data // Before 03/11/99 "0.000283" DDflte_(MSCF_ET_IO_TRANSFER, "0.00002"), // Assume time to transfer a KB of local message is 5 times // faster than the time to transfer a KB from disk // Units of MSCF_ET_LOCAL_MSG_TRANSFER are seconds/Kb DDflte_(MSCF_ET_LOCAL_MSG_TRANSFER, "0.000046"), // $$$ This should be removed. It is only used by preliminary costing // for the materialize operator, which should not be using it. DDflte_(MSCF_ET_NM_PAGE_FAULTS, "1"), // "?" used? // : for calibration on 04/08/2004 // Seek time will be derived from disk type. // MSCF_ET_NUM_IO_SEEKS units are seconds DDflte_(MSCF_ET_NUM_IO_SEEKS, "0.0038"), // Assume sending a local message takes 1000 cpu instructions DDflte_(MSCF_ET_NUM_LOCAL_MSGS, "0.000125"), // Assume sending a remote message takes 10000 cpu instructions // DDflte_(MSCF_ET_NUM_REMOTE_MSGS, "0.00125"), // Change the number of instructions to encode a remote message to be // the same as the local message DDflte_(MSCF_ET_NUM_REMOTE_MSGS, "0.000125"), // Assume 1MB/second transfer rate for transferring remote message bytes // (Based on 10 Megabit/second Ethernet transfer rate) // MSCF_ET_REMOTE_MSG_TRANSFER units are kb/Sec // DDflte_(MSCF_ET_REMOTE_MSG_TRANSFER, "0.001"), // the remote msg are 10% more costly than the local transfer // but also may depend on the physical link, so externalize it DDflte_(MSCF_ET_REMOTE_MSG_TRANSFER, "0.00005"), // ------------------------------------------------------------------------- // Factors used for estimating overlappability of I/O and messaging used // in the calculation for overlapped addition // Assume 50% overlap for now. // ------------------------------------------------------------------------- DDflte_(MSCF_OV_IO, "0.5"), DDflte_(MSCF_OV_IO_TRANSFER, "0.5"), DDflte_(MSCF_OV_LOCAL_MSG_TRANSFER, "0.5"), DDflte_(MSCF_OV_MSG, "0.5"), DDflte_(MSCF_OV_NUM_IO_SEEKS, "0.5"), DDflte_(MSCF_OV_NUM_LOCAL_MSGS, "0.5"), DDflte_(MSCF_OV_NUM_REMOTE_MSGS, "0.5"), DDflte_(MSCF_OV_REMOTE_MSG_TRANSFER, "0.5"), DDui___(MSCF_SYS_DISKS, "16"), // "?" used? DDui___(MSCF_SYS_MEMORY_PER_CPU, "1"), // "?" used? DDui___(MSCF_SYS_TEMP_SPACE_PER_DISK, "50"), // "?" used? DDkwd__(MTD_GENERATE_CC_PREDS, "ON"), DDint__(MTD_MDAM_NJ_UEC_THRESHOLD, "100"), // Allow for the setting of the row count in a long running operation XDDui1__(MULTI_COMMIT_SIZE, "10000"), // try the join order specified in the queries, this will cause the // enumeration of the initial join order specified by the user // among the join orders enumerated // ** This is currently OFF by default ** DDkwd__(MULTI_JOIN_CONSIDER_INITIAL_JOIN_ORDER, "OFF"), // used in JBBSubsetAnalysis::isAStarPattern for finding lowest cost // outer subtree for NJ into fact table. DDflt0_(MULTI_JOIN_PROBE_HASH_TABLE, "0.000001"), // threshold above which a query is considered complex // this only applies to queries that can be rewritten // as Multi Joins DDint__(MULTI_JOIN_QUERY_COMPLEXITY_THRESHOLD, "5120"), // threshold above which a query is considered to do // a lot of work his only applies to queries that can be // rewritten as Multi Joins DDflt__(MULTI_JOIN_QUERY_WORK_THRESHOLD, "0"), SDDint__(MULTI_JOIN_THRESHOLD, "3"), DDint__(MULTI_PASS_JOIN_ELIM_LIMIT, "5"), DDflt0_(MU_CPUCOST_INITIALIZE, ".05"), DDui___(MU_INITIAL_BUFFER_COUNT, "5."), DDflte_(MU_INITIAL_BUFFER_SIZE, "1033.7891"), //-------------------------------------------------------------------------- //++ MV XDDkwd__(MVGROUP_AUTOMATIC_CREATION, "ON"), DDkwd__(MVQR_ALL_JBBS_IN_QD, "OFF"), #ifdef NDEBUG DDkwd__(MVQR_ENABLE_LOGGING, "OFF"), // No logging by default for release #else DDkwd__(MVQR_ENABLE_LOGGING, "ON"), #endif DD_____(MVQR_FILENAME_PREFIX, "/usr/tandem/sqlmx/log"), DDkwd__(MVQR_LOG_QUERY_DESCRIPTORS, "OFF"), DDint__(MVQR_MAX_EXPR_DEPTH, "20"), DDint__(MVQR_MAX_EXPR_SIZE, "100"), DDint__(MVQR_MAX_MV_JOIN_SIZE, "10"), DDkwd__(MVQR_PARAMETERIZE_EQ_PRED, "ON"), DDkwd__(MVQR_PRIVATE_QMS_INIT, "SMD"), DDansi_(MVQR_PUBLISH_TABLE_LOCATION, ""), DDkwd__(MVQR_PUBLISH_TO, "BOTH"), DDansi_(MVQR_REWRITE_CANDIDATES, ""), XDDkwd__(MVQR_REWRITE_ENABLED_OPTION, "OFF"), // @ZX -- change to ON later XDDui0_5(MVQR_REWRITE_LEVEL, "0"), XDDkwd__(MVQR_REWRITE_SINGLE_TABLE_QUERIES, "ON"), DDkwd__(MVQR_USE_EXTRA_HUB_TABLES, "ON"), DDkwd__(MVQR_USE_RI_FOR_EXTRA_HUB_TABLES, "OFF"), DD_____(MVQR_WORKLOAD_ANALYSIS_MV_NAME, ""), XDDMVA__(MV_AGE, ""), XDDkwd__(MV_ALLOW_SELECT_SYSTEM_ADDED_COLUMNS, "OFF"), DDkwd__(MV_AS_ROW_TRIGGER, "OFF"), DDkwd__(MV_AUTOMATIC_LOGGABLE_COLUMN_MAINTENANCE, "ON"), DDkwd__(MV_DUMP_DEBUG_INFO, "OFF"), DDkwd__(MV_ENABLE_INTERNAL_REFRESH_SHOWPLAN, "OFF"), DDui___(MV_LOG_CLEANUP_SAFETY_FACTOR, "200"), DDui___(MV_LOG_CLEANUP_USE_MULTI_COMMIT, "1"), SDDkwd__(MV_LOG_PUSH_DOWN_DP2_DELETE, "OFF"), // push down mv logging tp dp2 for delete SDDkwd__(MV_LOG_PUSH_DOWN_DP2_INSERT, "OFF"), // push down mv logging tp dp2 for insert SDDkwd__(MV_LOG_PUSH_DOWN_DP2_UPDATE, "ON"), // push down mv logging tp dp2 for update SDDui___(MV_REFRESH_MAX_PARALLELISM, "0"), DDui___(MV_REFRESH_MAX_PIPELINING, "0"), DDint__(MV_REFRESH_MDELTA_MAX_DELTAS_THRESHOLD, "31"), DDint__(MV_REFRESH_MDELTA_MAX_JOIN_SIZE_FOR_SINGLE_PHASE, "3"), DDint__(MV_REFRESH_MDELTA_MIN_JOIN_SIZE_FOR_SINGLE_PRODUCT_PHASE, "8"), DDint__(MV_REFRESH_MDELTA_PHASE_SIZE_FOR_MID_RANGE, "6"), DDkwd__(MV_TRACE_INCONSISTENCY, "OFF"), DDSint__(MXCMP_PRIORITY, "0"), DDSint__(MXCMP_PRIORITY_DELTA, "0"), DDkwd__(NAMETYPE, "ANSI"), DDkwd__(NAR_DEPOBJ_ENABLE, "ON"), DDkwd__(NAR_DEPOBJ_ENABLE2, "ON"), // NATIONAL_CHARSET reuses the "kwd" logic here, w/o having to add any // DF_ token constants (this can be considered either clever or kludgy coding). DDkwd__(NATIONAL_CHARSET, (char *)SQLCHARSETSTRING_UNICODE), // These CQDs are reserved for NCM. These are mostly used for // internal testing, turning on/off features for debugging, and for tuning. // In normal situations, these will not be externalized in keeping // with the very few CQDs philosophy of NCM. // These are applicable only in conjunction with SIMPLE_COST_MODEL 'on'. DDflt__(NCM_CACHE_SIZE_IN_BLOCKS, "52"), DDflt__(NCM_COSTLIMIT_FACTOR, "0.05"), //change to 0.05 DDint__(NCM_ESP_FIXUP_WEIGHT, "300"), DDkwd__(NCM_ESP_STARTUP_FIX, "ON"), DDflt__(NCM_EXCH_MERGE_FACTOR, "0.10"), // change to 0.10 DDkwd__(NCM_EXCH_NDCS_FIX, "ON"), // change to ON DDkwd__(NCM_HBASE_COSTING, "ON"), // change to ON DDkwd__(NCM_HGB_OVERFLOW_COSTING, "ON"), DDkwd__(NCM_HJ_OVERFLOW_COSTING, "ON"), DDflt__(NCM_IND_JOIN_COST_ADJ_FACTOR, "1.0"), DDflt__(NCM_IND_JOIN_SELECTIVITY, "1.0"), DDflt__(NCM_IND_SCAN_COST_ADJ_FACTOR, "1.0"), DDflt__(NCM_IND_SCAN_SELECTIVITY, "1.0"), DDflt__(NCM_MAP_CPU_FACTOR, "4.0"), DDflt__(NCM_MAP_MSG_FACTOR, "4.0"), DDflt__(NCM_MAP_RANDIO_FACTOR, "4.0"), DDflt__(NCM_MAP_SEQIO_FACTOR, "4.0"), DDflt__(NCM_MDAM_COST_ADJ_FACTOR, "1.0"), DDflt__(NCM_MJ_TO_HJ_FACTOR, "0.6"), DDflt__(NCM_NJ_PC_THRESHOLD, "1.0"), DDflt0_(NCM_NJ_PROBES_MAXCARD_FACTOR, "10000"), DDkwd__(NCM_NJ_SEQIO_FIX, "ON"), // change to ON DDint__(NCM_NUM_SORT_RUNS, "4"), DDflt__(NCM_OLTP_ET_THRESHOLD, "60.0"), DDflt__(NCM_PAR_ADJ_FACTOR, "0.10"), DDkwd__(NCM_PAR_GRPBY_ADJ, "ON"), DDkwd__(NCM_PRINT_ROWSIZE, "OFF"), DDflt__(NCM_RAND_IO_ROWSIZE_FACTOR, "0"), DDflt__(NCM_RAND_IO_WEIGHT, "3258"), DDflt__(NCM_SEQ_IO_ROWSIZE_FACTOR, "0"), DDflt__(NCM_SEQ_IO_WEIGHT, "543"), DDflt__(NCM_SERIAL_NJ_FACTOR, "2"), DDflt__(NCM_SGB_TO_HGB_FACTOR, "0.8"), DDkwd__(NCM_SKEW_COST_ADJ_FOR_PROBES, "OFF"), DDkwd__(NCM_SORT_OVERFLOW_COSTING, "ON"), DDflt__(NCM_TUPLES_ROWSIZE_FACTOR, "0.5"), DDflt__(NCM_UDR_NANOSEC_FACTOR, "0.01"), DDkwd__(NCM_USE_HBASE_REGIONS, "ON"), // NESTED_JOINS ON means do NESTED_JOINS XDDkwd__(NESTED_JOINS, "ON"), // max. number of ESPs that will deal with skews for OCR // 0 means to turn off the feature DDintN1__(NESTED_JOINS_ANTISKEW_ESPS , "16"), DDkwd__(NESTED_JOINS_CHECK_LEADING_KEY_SKEW, "OFF"), DDkwd__(NESTED_JOINS_FULL_INNER_KEY, "OFF"), DDkwd__(NESTED_JOINS_KEYLESS_INNERJOINS, "ON"), DDui1__(NESTED_JOINS_LEADING_KEY_SKEW_THRESHOLD, "15"), DDkwd__(NESTED_JOINS_NO_NSQUARE_OPENS, "ON"), DDkwd__(NESTED_JOINS_OCR_GROUPING, "OFF"), // 128X32 being the default threshold for OCR. // 128 partitions per table and 32 ESPs per NJ operator SDDint__(NESTED_JOINS_OCR_MAXOPEN_THRESHOLD, "4096"), // PLAN0 is solely controlled by OCR. If this CQD is off, then // PLAN0 is off unconditionally. This CQD is used by OCR unit test. DDkwd__(NESTED_JOINS_PLAN0, "ON"), // try the explicit sort plan when plan2 produces a non-sort plan DDkwd__(NESTED_JOINS_PLAN3_TRY_SORT, "ON"), // Enable caching for eligible nested joins - see NestedJoin::preCodeGen. DDkwd__(NESTED_JOIN_CACHE, "ON"), // Enable pulling up of predicates into probe cache DDkwd__(NESTED_JOIN_CACHE_PREDS, "ON"), // Nested Join Heuristic DDkwd__(NESTED_JOIN_CONTROL, "ON"), // Allow nested join for cross products DDkwd__(NESTED_JOIN_FOR_CROSS_PRODUCTS, "ON"), DDkwd__(NEW_MDAM, "ON"), DDkwd__(NEW_OPT_DRIVER, "ON"), // Ansi name of the next DEFAULTS table to read in. // Contains blanks, or the name of a DEFAULTS table to read values from next, // after reading all values from this DEFAULTS table. The name may contain // format strings of '%d' and '%u', which are replaced with the domain name // and user name, respectively, of the current user. The name may begin with // '$', in which it is replaced by its value as a SYSTEM environment variable. // This value in turn may contain '%d' and '%u' formats. When these // replacements are complete, the resulting name is qualified by the current // default catalog and schema, if necessary, and the resulting three-part ANSI // table's default values are read in. This table may contain another // NEXT_DEFAULTS_TABLE value, and different default CATALOG and // SCHEMA values to qualify the resulting table name, and so on, allowing a // chain of tables to be read; combined with the format and environment // variable replacements, this allows per-domain, per-system, and per-user // customization of SQL/MX default values. DDansi_(NEXT_DEFAULTS_TABLE, ""), DDui1__(NEXT_VALUE_FOR_BUFFER_SIZE, "10240"), DDui1__(NEXT_VALUE_FOR_NUM_BUFFERS, "3"), DDui1__(NEXT_VALUE_FOR_SIZE_DOWN, "4"), DDui1__(NEXT_VALUE_FOR_SIZE_UP, "2048"), DDflt0_(NJ_CPUCOST_INITIALIZE, ".1"), DDflt0_(NJ_CPUCOST_PASS_ROW, ".02"), DDflte_(NJ_INC_AFTERLIMIT, "0.0055"), DDflte_(NJ_INC_MOVEROWS, "0.0015"), DDflte_(NJ_INC_UPTOLIMIT, "0.0225"), DDui___(NJ_INITIAL_BUFFER_COUNT, "5"), DDui1__(NJ_INITIAL_BUFFER_SIZE, "5"), DDui1__(NJ_MAX_SEEK_DISTANCE, "5000"), // UDF costing CQDs for processing a steady state row DDui___(NORMAL_UDF_CPU_COST, "100"), DDui___(NORMAL_UDF_IO_COST, "0"), DDui___(NORMAL_UDF_MSG_COST, "2"), XDDui30_32000(NOT_ATOMIC_FAILURE_LIMIT, "32000"), //NOT IN ANSI NULL semantics rule DDkwd__(NOT_IN_ANSI_NULL_SEMANTICS, "ON"), //NOT IN optimization DDkwd__(NOT_IN_OPTIMIZATION, "ON"), //NOT IN outer column optimization DDkwd__(NOT_IN_OUTER_OPTIMIZATION, "ON"), // NOT IN skew buster optimization DDkwd__(NOT_IN_SKEW_BUSTER_OPTIMIZATION, "ON"), DDkwd__(NOT_NULL_CONSTRAINT_DROPPABLE_OPTION, "OFF"), DDkwd__(NOWAITED_FIXUP_MESSAGE_TO_DP2, "OFF"), // NSK DEBUG defaults DDansi_(NSK_DBG, "OFF"), DDansi_(NSK_DBG_COMPILE_INSTANCE, "USER"), DDkwd__(NSK_DBG_GENERIC, "OFF"), DDansi_(NSK_DBG_LOG_FILE, ""), DDkwd__(NSK_DBG_MJRULES_TRACKING, "OFF"), DDkwd__(NSK_DBG_PRINT_CHAR_INPUT, "OFF"), DDkwd__(NSK_DBG_PRINT_CHAR_OUTPUT, "OFF"), DDkwd__(NSK_DBG_PRINT_CONSTRAINT, "OFF"), DDkwd__(NSK_DBG_PRINT_CONTEXT, "OFF"), DDkwd__(NSK_DBG_PRINT_CONTEXT_POINTER, "OFF"), DDkwd__(NSK_DBG_PRINT_COST, "OFF"), DDkwd__(NSK_DBG_PRINT_COST_LIMIT, "OFF"), DDkwd__(NSK_DBG_PRINT_INDEX_ELIMINATION, "OFF"), DDkwd__(NSK_DBG_PRINT_ITEM_EXPR, "OFF"), DDkwd__(NSK_DBG_PRINT_LOG_PROP, "OFF"), DDkwd__(NSK_DBG_PRINT_PHYS_PROP, "OFF"), DDkwd__(NSK_DBG_PRINT_TASK, "OFF"), DDkwd__(NSK_DBG_PRINT_TASK_STACK, "OFF"), DDkwd__(NSK_DBG_QUERY_LOGGING_ONLY, "OFF"), DDansi_(NSK_DBG_QUERY_PREFIX, ""), DDkwd__(NSK_DBG_SHOW_PASS1_PLAN, "OFF"), DDkwd__(NSK_DBG_SHOW_PASS2_PLAN, "OFF"), DDkwd__(NSK_DBG_SHOW_PLAN_LOG, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_ANALYSIS, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_BINDING, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_CODEGEN, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_NORMALIZATION, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_PARSING, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_PRE_CODEGEN, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_SEMANTIC_QUERY_OPTIMIZATION, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_TRANSFORMATION, "OFF"), DDkwd__(NSK_DBG_STRATEGIZER, "OFF"), DDflt0_(NUMBER_OF_PARTITIONS_DEVIATION, "0.25"), DDui1__(NUMBER_OF_ROWS_PARALLEL_THRESHOLD, "5000"), DDui1__(NUMBER_OF_USERS, "1"), DDui1__(NUM_OF_BLOCKS_PER_ACCESS, "SYSTEM"), DDflt0_(NUM_OF_PARTS_DEVIATION_TYPE2_JOINS, "SYSTEM"), DDkwd__(NVCI_PROCESS, "FALSE"), DDflt0_(OCB_COST_ADJSTFCTR, "0.996"), DDui___(OCR_FOR_SIDETREE_INSERT, "1"), DDkwd__(ODBC_METADATA_PROCESS, "FALSE"), DDkwd__(ODBC_PROCESS, "FALSE"), DDflte_(OHJ_BMO_REUSE_SORTED_BMOFACTOR_LIMIT, "3.0"), DDflte_(OHJ_BMO_REUSE_SORTED_UECRATIO_UPPERLIMIT, "0.7"), DDflte_(OHJ_BMO_REUSE_UNSORTED_UECRATIO_UPPERLIMIT, "0.01"), DDflte_(OHJ_VBMOLIMIT, "5.0"), DDui1__(OLAP_BUFFER_SIZE, "262144"), // Do not alter (goes to DP2) DDkwd__(OLAP_CAN_INVERSE_ORDER, "ON"), DDui1__(OLAP_MAX_FIXED_WINDOW_EXTRA_BUFFERS, "2"), DDui1__(OLAP_MAX_FIXED_WINDOW_FRAME, "50000"), DDui1__(OLAP_MAX_NUMBER_OF_BUFFERS, "100000"), DDui___(OLAP_MAX_ROWS_IN_OLAP_BUFFER, "0"), //aplies for fixed window-- number of additional oplap buffers //to allocate on top of the minumum numbers DDkwd__(OLD_HASH2_GROUPING, "FALSE"), DDkwd__(OLT_QUERY_OPT, "ON"), DDkwd__(OLT_QUERY_OPT_LEAN, "OFF"), // ----------------------------------------------------------------------- // Optimizer pruning heuristics. // ----------------------------------------------------------------------- DDkwd__(OPH_EXITHJCRCONTCHILOOP, "ON"), DDkwd__(OPH_EXITMJCRCONTCHILOOP, "ON"), DDkwd__(OPH_EXITNJCRCONTCHILOOP, "OFF"), DDkwd__(OPH_PRUNE_WHEN_COST_LIMIT_EXCEEDED, "OFF"), DDflt__(OPH_PRUNING_COMPLEXITY_THRESHOLD, "10.0"), DDflt__(OPH_PRUNING_PASS2_COST_LIMIT, "-1.0"), DDkwd__(OPH_REDUCE_COST_LIMIT_FROM_CANDIDATES, "OFF"), DDkwd__(OPH_REDUCE_COST_LIMIT_FROM_PASS1_SOLUTION, "ON"), DDkwd__(OPH_REUSE_FAILED_PLAN, "ON"), DDkwd__(OPH_REUSE_OPERATOR_COST, "OFF"), DDkwd__(OPH_SKIP_OGT_FOR_SHARED_GC_FAILED_CL, "OFF"), DDkwd__(OPH_USE_CACHED_ELAPSED_TIME, "ON"), DDkwd__(OPH_USE_CANDIDATE_PLANS, "OFF"), DDkwd__(OPH_USE_COMPARE_COST_THRESHOLD, "ON"), DDkwd__(OPH_USE_CONSERVATIVE_COST_LIMIT, "OFF"), DDkwd__(OPH_USE_ENFORCER_PLAN_PROMOTION, "OFF"), DDkwd__(OPH_USE_FAILED_PLAN_COST, "ON"), DDkwd__(OPH_USE_NICE_CONTEXT, "OFF"), DDkwd__(OPH_USE_ORDERED_MJ_PRED, "OFF"), DDkwd__(OPH_USE_PWS_FLAG_FOR_CONTEXT, "OFF"), XDDui___(OPI_ERROR73_RETRIES, "10"), DDflt__(OPTIMIZATION_BUDGET_FACTOR, "5000"), DDkwd__(OPTIMIZATION_GOAL, "LASTROW"), XDDkwd__(OPTIMIZATION_LEVEL, "3"), DDpct__(OPTIMIZATION_LEVEL_1_CONSTANT_1, "50"), DDpct__(OPTIMIZATION_LEVEL_1_CONSTANT_2, "0"), DDui1__(OPTIMIZATION_LEVEL_1_IMMUNITY_LIMIT, "5000"), DDui1__(OPTIMIZATION_LEVEL_1_MJENUM_LIMIT, "20"), DDui1__(OPTIMIZATION_LEVEL_1_SAFETY_NET, "30000"), DDflt__(OPTIMIZATION_LEVEL_1_SAFETY_NET_MULTIPLE, "3.0"), DDui1__(OPTIMIZATION_LEVEL_1_THRESHOLD, "1000"), DDui1__(OPTIMIZATION_TASKS_LIMIT, "2000000000"), DDui1__(OPTIMIZATION_TASK_CAP, "30000"), // Optimizer Graceful Termination: // 1=> randomProbabilistic pruning // > 1 pruning based on potential DDui1__(OPTIMIZER_GRACEFUL_TERMINATION, "2"), DDkwd__(OPTIMIZER_HEURISTIC_1, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_2, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_3, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_4, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_5, "OFF"), // Tells the compiler to print costing information DDkwd__(OPTIMIZER_PRINT_COST, "OFF"), // Tells the compiler to issue a warning with its internal counters DDkwd__(OPTIMIZER_PRINT_INTERNAL_COUNTERS, "OFF"), // Pruning is OFF because of bugs, turn to ON when bugs are fixed // (03/03/98) SDDkwd__(OPTIMIZER_PRUNING, "ON"), DDkwd__(OPTIMIZER_PRUNING_FIX_1, "ON"), //change to ON DDkwd__(OPTIMIZER_SYNTH_FUNC_DEPENDENCIES, "ON"), //OPTS_PUSH_DOWN_DAM made external RV 06/21/01 CR 10-010425-2440 DDui___(OPTS_PUSH_DOWN_DAM, "0"), DDkwd__(ORDERED_HASH_JOIN_CONTROL, "ON"), SDDkwd__(OR_OPTIMIZATION, "ON"), DDkwd__(OR_PRED_ADD_BLOCK_TO_IN_LIST, "ON"), DDkwd__(OR_PRED_KEEP_CAST_VC_UCS2, "ON"), // controls the jump table method of evaluating an or pred. in a scan node // 0 => feature is OFF, positive integer denotes max OR pred that will be // processed through a jump table. DDint__(OR_PRED_TO_JUMPTABLE, "2000"), // controls semijoin method of evaluating an or pred. // 0 => feature is OFF, positive number means if pred do not cover key cols // and jump table is not available, then the transformation is done if // inlist is larger than this value. DDint__(OR_PRED_TO_SEMIJOIN, "100"), // Ratio of tablesize (without application of any preds)to probes below // which semijoin trans. is favoured. DDflt0_(OR_PRED_TO_SEMIJOIN_PROBES_MAX_RATIO, "0.001"), // Minimum table size beyond which semijoin trans. is considered DDint__(OR_PRED_TO_SEMIJOIN_TABLE_MIN_SIZE, "10000"), // The Optimizer Simulator (OSIM) CQDs DDkwd__(OSIM_USE_POS, "OFF"), DDint__(OSIM_USE_POS_DISK_SIZE_GB, "0"), DD_____(OSIM_USE_POS_NODE_NAMES, ""), DDui2__(OS_MESSAGE_BUFFER_SIZE, "32"), // if set to "ansi", datetime output is in ansi format. Currently only // used in special_1 mode if the caller needs datetime value in // ansi format (like, during upd stats). DDansi_(OUTPUT_DATE_FORMAT, ""), // Overflow mode for scratch files DDkwd__(OVERFLOW_MODE, "MMAP"), // Sequence generator override identity values DDkwd__(OVERRIDE_GENERATED_IDENTITY_VALUES, "OFF"), // allow users to specify a source schema to be // replaced by a target schema SDDosch_(OVERRIDE_SCHEMA, ""), // Allows users to specify their own SYSKEY value. In other words // the system does not generate one for them. // Prior to this CQD, pm_regenerate_syskey_for_insert was being used // to preserve the syskey. Carrying over these comments from // pm_regenerate_syskey_for_insert // For audited target partition, PM does the copy in multiple transactions // In each transaction PM does a insert/select from the source to the target // partition. The clustering key values from the last row of a transaction // is used as begin key value for the next transaction. If the table // has a syskey then it gets regenerated and last row contains the new // value for the syskey. This obviously causes us to start at a different // place then we intended to start from. The following default when set // to off forces the engine to not regenerate syskey. DDkwd__(OVERRIDE_SYSKEY, "OFF"), DDui___(PARALLEL_ESP_NODEMASK, "0"), // by default all parallelism heuristics are switched ON. DDkwd__(PARALLEL_HEURISTIC_1, "ON"), DDkwd__(PARALLEL_HEURISTIC_2, "ON"), DDkwd__(PARALLEL_HEURISTIC_3, "ON"), DDkwd__(PARALLEL_HEURISTIC_4, "ON"), // If PARALLEL_NUM_ESPS is "SYSTEM", // optimizer will compute the number of ESPs. XDDui1__(PARALLEL_NUM_ESPS, "SYSTEM"), // If PARALLEL_NUM_ESPS is "SYSTEM", // optimizer will compute the number of ESPs to be used for parallel ddl // operations. DDui1__(PARALLEL_NUM_ESPS_DDL, "SYSTEM"), // If PARALLEL_NUM_ESPS is "SYSTEM", // optimizer will compute the number of ESPs to be used for parallel purgedata // operation. DDui1__(PARALLEL_NUM_ESPS_PD, "SYSTEM"), // is partial sort applicable; if so adjust sort cost accordingly DDflt0_(PARTIAL_SORT_ADJST_FCTR, "1"), DDint__(PARTITIONING_SCHEME_SHARING, "1"), // The optimal number of partition access nodes for a process. // NOTE: Setting this to anything other than 1 will cause problems // with Cascades plan stealing! Don't do it unless you have to! DDui1__(PARTITION_ACCESS_NODES_PER_ESP, "1"), DD_____(PCODE_DEBUG_LOGDIR, "" ), // Pathname of log directory for PCode work DDint__(PCODE_EXPR_CACHE_CMP_ONLY, "0" ), // PCode Expr Cache compare-only mode DDint__(PCODE_EXPR_CACHE_DEBUG, "0" ), // PCode Expr Cache debug (set to 1 to enable dbg logging) DDint__(PCODE_EXPR_CACHE_ENABLED, "1" ), // PCode Expr Cache Enabled (set to 0 to disable the cache) DD0_10485760(PCODE_EXPR_CACHE_SIZE,"2000000"), // PCode Expr Cache Max Size // Maximum number of PCODE Branch Instructions in an Expr // for which we will attempt PCODE optimizations. // NOTE: Default value reduced to 12000 for Trafodion to avoid stack // overflow in PCODE optimization where recursion is used. DDint__(PCODE_MAX_OPT_BRANCH_CNT, "12000"), // Maximum number of PCODE Instructions in an Expr // for which we will attempt PCODE optimizations. DDint__(PCODE_MAX_OPT_INST_CNT, "50000"), DDint__(PCODE_NE_DBG_LEVEL, "-1"), // Native Expression Debug Level DDint__(PCODE_NE_ENABLED, "1" ), // Native Expressions Enabled DDkwd__(PCODE_NE_IN_SHOWPLAN, "ON"), // Native Expression in Showplan output // This PCODE_NE_LOG_PATH cqd is now obsolete. Use PCODE_DEBUG_LOGDIR instead. // Would delete the following line except that would also mean deleting the // corresponding line in DefaultConstants.h which would change the values for // the following definitions in the same enum. DD_____(PCODE_NE_LOG_PATH, "" ), // Pathname of log file for Native Expression work - OBSOLETE DDint__(PCODE_OPT_FLAGS, "60"), DDkwd__(PCODE_OPT_LEVEL, "MAXIMUM"), DDint__(PHY_MEM_CONTINGENCY_MB, "3072"), DDkwd__(PLAN_STEALING, "ON"), DDui50_4194303(PM_OFFLINE_TRANSACTION_GRANULARITY, "5000"), DDui50_4194303(PM_ONLINE_TRANSACTION_GRANULARITY, "400"), // Not in use anymore. OVERRIDE_SYSKEY is used instead. DDkwd__(PM_REGENERATE_SYSKEY_FOR_INSERT, "ON"), // Partition OVerlay Support (POS) options SDDkwd__(POS, "DISK_POOL"), XDDpos__(POS_ABSOLUTE_MAX_TABLE_SIZE, ""), DDkwd__(POS_ALLOW_NON_PK_TABLES, "OFF"), DDui___(POS_CPUS_PER_SEGMENT, "16"), // default to 300 GB DDui___(POS_DEFAULT_LARGEST_DISK_SIZE_GB, "300"), // default to 72GB DDui___(POS_DEFAULT_SMALLEST_DISK_SIZE_GB, "72"), DDdskNS(POS_DISKS_IN_SEGMENT, ""), SDDui___(POS_DISK_POOL, "0"), DD_____(POS_FILE_OPTIONS, ""), SDDdskNS(POS_LOCATIONS, ""), DDkwd__(POS_MAP_HASH_TO_HASH2, "ON"), DDpos__(POS_MAX_EXTENTS, ""), SDDui___(POS_NUM_DISK_POOLS, "0"), DDui___(POS_NUM_OF_PARTNS, "SYSTEM"), SDDint__(POS_NUM_OF_TEMP_TABLE_PARTNS, "SYSTEM"), SDDpos__(POS_PRI_EXT_SIZE, "25"), DDkwd__(POS_RAISE_ERROR, "OFF"), SDDpos__(POS_SEC_EXT_SIZE, ""), SDDpos__(POS_TABLE_SIZE, ""), SDDpct__(POS_TEMP_TABLE_FREESPACE_THRESHOLD_PERCENT, "0"), SDDdskNS(POS_TEMP_TABLE_LOCATIONS, ""), SDDpos__(POS_TEMP_TABLE_SIZE, ""), DDkwd__(POS_TEST_MODE, "OFF"), DDui___(POS_TEST_NUM_NODES, "0"), DDui___(POS_TEST_NUM_VOLUMES_PER_NODE, "0"), // Use info from right child to require order on left child of NJ //PREFERRED_PROBING_ORDER_FOR_NESTED_JOIN made external RV 06/21/01 CR 10-010425-2440 DDkwd__(PREFERRED_PROBING_ORDER_FOR_NESTED_JOIN, "OFF"), DD0_18(PRESERVE_MIN_SCALE, "0"), DDkwd__(PRIMARY_KEY_CONSTRAINT_DROPPABLE_OPTION, "OFF"), DDkwd__(PSHOLD_CLOSE_ON_ROLLBACK, "OFF"), DDkwd__(PSHOLD_UPDATE_BEFORE_FETCH, "OFF"), SDDpsch_(PUBLIC_SCHEMA_NAME, ""), XDDrlis_(PUBLISHING_ROLES, ""), DDkwd__(PURGEDATA_WITH_OFFLINE_TABLE, "OFF"), // Query Invalidation - Debug/Regression test CQDs -- DO NOT externalize these DD_____(QI_PATH, "" ), // Specifies cat.sch.object path for object to have cache entries removed DD0_255(QI_PRIV, "0"), // Note: 0 disables the Debug Mechanism. Set non-zero to kick out cache entries. // Then set back to 0 *before* setting to a non-zero value again. // Do the query analysis phase DDkwd__(QUERY_ANALYSIS, "ON"), // query_cache max should be 200 MB. Set it 0 to turn off query cache //XDD0_200000(QUERY_CACHE, "0"), XDD0_200000(QUERY_CACHE, "16384"), // the initial average plan size (in kbytes) to use for configuring the // number of hash buckets to use for mxcmp's hash table of cached plans DD1_200000(QUERY_CACHE_AVERAGE_PLAN_SIZE, "30"), // literals longer than this are not parameterized DDui___(QUERY_CACHE_MAX_CHAR_LEN, "32000"), // a query with more than QUERY_CACHE_MAX_EXPRS ExprNodes is not cacheable DDint__(QUERY_CACHE_MAX_EXPRS, "1000"), // the largest number of cache entries that an unusually large cache // entry is allowed to displace from mxcmp's cache of query plans DD0_200000(QUERY_CACHE_MAX_VICTIMS, "10"), DDkwd__(QUERY_CACHE_MPALIAS, "OFF"), DD0_255(QUERY_CACHE_REQUIRED_PREFIX_KEYS, "255"), DDkwd__(QUERY_CACHE_RUNTIME, "ON"), SDDflt0_(QUERY_CACHE_SELECTIVITY_TOLERANCE, "0"), // query cache statement pinning is off by default DDkwd__(QUERY_CACHE_STATEMENT_PINNING, "OFF"), DDkwd__(QUERY_CACHE_STATISTICS, "OFF"), DD_____(QUERY_CACHE_STATISTICS_FILE, "qcachsts"), DDkwd__(QUERY_CACHE_TABLENAME, "OFF"), DDkwd__(QUERY_CACHE_USE_CONVDOIT_FOR_BACKPATCH, "ON"), // Limit CPU time a query can use in master or any ESP. Unit is seconds. XDDint__(QUERY_LIMIT_SQL_PROCESS_CPU, "0"), // Extra debugging info for QUERY_LIMIT feature. DDkwd__(QUERY_LIMIT_SQL_PROCESS_CPU_DEBUG, "OFF"), // How many iterations in scheduler subtask list before evaluating limits. DDint__(QUERY_LIMIT_SQL_PROCESS_CPU_DP2_FREQ, "16"), // For X-prod HJ: (# of rows joined * LIMIT) before preempt. DDint__(QUERY_LIMIT_SQL_PROCESS_CPU_XPROD, "10000"), // controls various expr optimizations based on bit flags. // see enum QueryOptimizationOptions in DefaultConstants.h DDint__(QUERY_OPTIMIZATION_OPTIONS, "3"), DDkwd__(QUERY_STRATEGIZER, "ON"), DDflt0_(QUERY_STRATEGIZER_2N_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_EXHAUSTIVE_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N2_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N3_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N4_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N_COMPLEXITY_FACTOR, "1"), DDkwd__(QUERY_TEMPLATE_CACHE, "ON"), DDkwd__(QUERY_TEXT_CACHE, "SYSTEM"), DDkwd__(R2_HALLOWEEN_SUPPORT, "OFF"), DDkwd__(RANGESPEC_TRANSFORMATION, "ON"), // RangeSpec Transformation CQD. // To be ANSI compliant you would have to set this default to 'FALSE' DDkwd__(READONLY_CURSOR, "TRUE"), // ReadTableDef compares transactional identifiers during endTransaction() processing DDkwd__(READTABLEDEF_TRANSACTION_ASSERT, "OFF"), DDkwd__(READTABLEDEF_TRANSACTION_ENABLE_WARNINGS, "OFF"), DDint__(READTABLEDEF_TRANSACTION_TESTPOINT, "0"), DDflt0_(READ_AHEAD_MAX_BLOCKS, "16.0"), // OFF means Ansi/NIST setting, ON is more similar to the SQL/MP behavior DDkwd__(RECOMPILATION_WARNINGS, "OFF"), // CLI caller to redrive CTAS(create table as) for child query monitoring DDkwd__(REDRIVE_CTAS, "OFF"), // The group by reduction for pushing a partial group by past the // right side of the TSJ must be at least this much. If 0.0, then // pushing it will always be tried. DDflt0_(REDUCTION_TO_PUSH_GB_PAST_TSJ, "0.0000000001"), // This is the code base for the calibration machine. It must be either // "DEBUG" or "RELEASE" // History: // Before 02/01/99: DEBUG DDkwd__(REFERENCE_CODE, "RELEASE"), // This is the frequency of the representative CPU of the base calibration // cluster. // REFERENCE_CPU_FREQUENCY units are MhZ DDflte_(REFERENCE_CPU_FREQUENCY, "199."), // This is the seek time of the representative disk of the base // calibration cluster. // REFERENCE_IO_SEEK_TIME units are seconds DDflte_(REFERENCE_IO_SEEK_TIME, "0.0038"), // This is the sequential transfer rate for the representative // disk of the base calibration cluster. // REFERENCE_IO_SEQ_READ_RATE units are Mb/Sec DDflte_(REFERENCE_IO_SEQ_READ_RATE, "50.0"), // This is the transfer rate for the fast speed connection of // nodes in the base calibration cluster. // REFERENCE_MSG_LOCAL_RATE units are Mb/Sec DDflte_(REFERENCE_MSG_LOCAL_RATE, "10."), // This is the timeper local msg for the fast speed connection of // nodes in the base calibration cluster. // REFERENCE_MSG_LOCAL_TIME units are seconds DDflte_(REFERENCE_MSG_LOCAL_TIME, "0.000125"), // This is the transfer rate for the connection among clusters // in the base calibration cluster (this only applies to NSK) // REFERENCE_MSG_REMOTE_RATE units are Mb/Sec DDflte_(REFERENCE_MSG_REMOTE_RATE, "1."), // This is the time per remote msg for the fast speed connection of // nodes in the base calibration cluster. // REFERENCE_MSG_REMOTE_TIME units are seconds DDflte_(REFERENCE_MSG_REMOTE_TIME, "0.00125"), DDkwd__(REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT, "SYSTEM"), DDkwd__(REMOTE_ESP_ALLOCATION, "SYSTEM"), DDkwd__(REORG_IF_NEEDED, "OFF"), DDkwd__(REORG_VERIFY, "OFF"), DDrlis_(REPLICATE_ALLOW_ROLES, ""), // Determines the compression type to be used with DDL when replicating DDkwd__(REPLICATE_COMPRESSION_TYPE, "SYSTEM"), // Determines if DISK POOL setting should be passed with DDL when replicating DDkwd__(REPLICATE_DISK_POOL, "ON"), // Display a BDR-internally-generated command before executing it DDkwd__(REPLICATE_DISPLAY_INTERNAL_CMD, "OFF"), // Executing commands generated internally by BDR DDkwd__(REPLICATE_EXEC_INTERNAL_CMD, "OFF"), // VERSION of the message from the source system to maintain compatibility // This version should be same as REPL_IO_VERSION_CURR in executor/ExeReplInterface.h // Make changes accordingly in validataorReplIoVersion validator DDrver_(REPLICATE_IO_VERSION, "17"), DDansi_(REPLICATE_MANAGEABILITY_CATALOG, "MANAGEABILITY"), // max num of retries after replicate server(mxbdrdrc) returns an error DDui___(REPLICATE_NUM_RETRIES, "0"), DDansi_(REPLICATE_TEST_TARGET_CATALOG, ""), DDansi_(REPLICATE_TEST_TARGET_MANAGEABILITY_CATALOG, ""), DDkwd__(REPLICATE_WARNINGS, "OFF"), DDkwd__(RETURN_AVG_STREAM_WAIT, "OFF"), DDkwd__(REUSE_BASIC_COST, "ON"), // if set, tables are not closed at the end of a query. This allows // the same open to be reused for the next query which accesses that // table. // If the table is shared opened by multiple openers from the same // process, then the share count is decremented until it reaches 1. // At that time, the last open is preserved so it could be reused. // Tables are closed if user id changes. DDkwd__(REUSE_OPENS, "ON"), // multiplicative factor used to inflate cost of risky operators. // = 1.0 means do not demand an insurance premium from risky operators. // = 1.2 means demand a 20% insurance premium that cost of risky operators // must overcome before they will be chosen over less-risky operators. DDflt0_(RISK_PREMIUM_MJ, "1.15"), XDDflt0_(RISK_PREMIUM_NJ, "1.0"), XDDflt0_(RISK_PREMIUM_SERIAL, "1.0"), XDDui___(RISK_PREMIUM_SERIAL_SCALEBACK_MAXCARD_THRESHOLD, "10000"), DDflt0_(ROBUST_HJ_TO_NJ_FUDGE_FACTOR, "0.0"), DDflt0_(ROBUST_PAR_GRPBY_EXCHANGE_FCTR, "0.25"), DDflt0_(ROBUST_PAR_GRPBY_LEAF_FCTR, "0.25"), // external master CQD that sets following internal CQDs // robust_query_optimization // MINIMUM SYSTEM HIGH MAXIMUM // risk_premium_NJ 1.0 system 2.5 5.0 // risk_premium_SERIAL 1.0 system 1.5 2.0 // partitioning_scheme_sharing 0 system 2 2 // robust_hj_to_nj_fudge_factor 0.0 system 3.0 1.0 // robust_sortgroupby 0 system 2 2 // risk_premium_MJ 1.0 system 1.5 2.0 // see optimizer/ControlDB.cpp ControlDB::doRobustQueryOptimizationCQDs // for the actual cqds that set these values XDDkwd__(ROBUST_QUERY_OPTIMIZATION, "SYSTEM"), // 0: allow sort group by in all // 1: disallow sort group by from partial grpByRoot if no order requirement // 2: disallow sort group by from partial grpByRoot // 3: disallow sort group by in ESP DDint__(ROBUST_SORTGROUPBY, "1"), SDDui___(ROUNDING_MODE, "0"), DDui___(ROUTINE_CACHE_SIZE, "20"), // UDF default Uec DDui___(ROUTINE_DEFAULT_UEC, "1"), DDkwd__(ROUTINE_JOINS_SPOIL_JBB, "OFF"), DDkwd__(ROWSET_ROW_COUNT, "OFF"), DDint__(SAP_KEY_NJ_TABLE_SIZE_THRESHOLD, "10000000"), DDkwd__(SAP_PA_DP2_AFFINITY_FOR_INSERTS, "ON"), DDkwd__(SAP_PREFER_KEY_NESTED_JOIN, "OFF"), DDint__(SAP_TUPLELIST_SIZE_THRESHOLD, "5000"), XDDkwd__(SAVE_DROPPED_TABLE_DDL, "OFF"), XDDansi_(SCHEMA, "SEABASE"), SDDdskNS(SCRATCH_DISKS, ""), SDDdskNS(SCRATCH_DISKS_EXCLUDED, "$SYSTEM"), DDdskNS(SCRATCH_DISKS_PREFERRED, ""), DDkwd__(SCRATCH_DISK_LOGGING, "OFF"), DDdskNT(SCRATCH_DRIVE_LETTERS, ""), DDdskNT(SCRATCH_DRIVE_LETTERS_EXCLUDED, ""), DDdskNT(SCRATCH_DRIVE_LETTERS_PREFERRED, ""), SDDpct__(SCRATCH_FREESPACE_THRESHOLD_PERCENT, "1"), DDui___(SCRATCH_IO_BLOCKSIZE_SORT, "524288"), //On LINUX, writev and readv calls are used to perform //scratch file IO. This CQD sets the vector size to use //in writev and readv calls. Overall IO size is affected //by this cqd. Also, related cqds that are related to //IO size are: COMP_INT_67, GEN_HGBY_BUFFER_SIZE. //GEN_HSHJ_BUFFER_SIZE, OLAP_BUFFER_SIZE, //EXE_HGB_INITIAL_HT_SIZE. Vector size is no-op on other //platforms. DDui___(SCRATCH_IO_VECTOR_SIZE_HASH, "8"), DDui___(SCRATCH_IO_VECTOR_SIZE_SORT, "1"), DDui___(SCRATCH_MAX_OPENS_HASH, "1"), DDui___(SCRATCH_MAX_OPENS_SORT, "1"), DDui___(SCRATCH_MGMT_OPTION, "11"), DDkwd__(SCRATCH_PREALLOCATE_EXTENTS, "OFF"), DD_____(SEABASE_CATALOG, TRAFODION_SYSCAT_LIT), DDkwd__(SEABASE_VOLATILE_TABLES, "ON"), // SeaMonster messaging -- the default can be ON, OFF, or SYSTEM. // When the default is SYSTEM we take the setting from env var // SQ_SEAMONSTER which will have a value of 0 or 1. DDkwd__(SEAMONSTER, "SYSTEM"), SDDkwd__(SEMIJOIN_TO_INNERJOIN_TRANSFORMATION, "SYSTEM"), // Disallow/Allow semi and anti-semi joins in MultiJoin framework DDkwd__(SEMI_JOINS_SPOIL_JBB, "OFF"), DDkwd__(SEQUENTIAL_BLOCKSPLIT, "SYSTEM"), DDansi_(SESSION_ID, ""), DDkwd__(SESSION_IN_USE, "OFF"), DDansi_(SESSION_USERNAME, ""), DDflt0_(SGB_CPUCOST_INITIALIZE, ".05"), DDui___(SGB_INITIAL_BUFFER_COUNT, "5."), DDui1__(SGB_INITIAL_BUFFER_SIZE, "5."), DDkwd__(SHAREOPENS_ON_REFCOUNT, "ON"), DDkwd__(SHARE_TEMPLATE_CACHED_PLANS, "ON"), DDui___(SHORT_OPTIMIZATION_PASS_THRESHOLD, "12"), SDDkwd__(SHOWCONTROL_SHOW_ALL, "OFF"), SDDkwd__(SHOWCONTROL_SHOW_SUPPORT, "OFF"), DDkwd__(SHOWDDL_DISPLAY_FORMAT, "EXTERNAL"), DDkwd__(SHOWDDL_DISPLAY_PRIVILEGE_GRANTS, "SYSTEM"), DDint__(SHOWDDL_FOR_REPLICATE, "0"), DDkwd__(SHOWLABEL_LOCKMODE, "OFF"), DDkwd__(SHOWWARN_OPT, "ON"), DDkwd__(SHOW_MEMO_STATS, "OFF"), DDkwd__(SIMILARITY_CHECK, "ON "), DDkwd__(SIMPLE_COST_MODEL, "ON"), XDDkwd__(SKEW_EXPLAIN, "ON"), XDDflt__(SKEW_ROWCOUNT_THRESHOLD, "1000000"), // Column row count // threshold below // which skew // buster is disabled. XDDflt__(SKEW_SENSITIVITY_THRESHOLD, "0.1"), DDkwd__(SKIP_METADATA_VIEWS, "OFF"), DDkwd__(SKIP_TRANSLATE_SYSCAT_DEFSCH_NAMES, "ON"), DDkwd__(SKIP_UNAVAILABLE_PARTITION, "OFF"), DDkwd__(SKIP_VCC, "OFF"), DDui0_5(SOFT_REQ_HASH_TYPE, "2"), DDkwd__(SORT_ALGO, "QS"), // Calibration // 01/23/98: 10000 // Original: 10. DDflt0_(SORT_CPUCOST_INITIALIZE, "10000."), DDui1__(SORT_EX_BUFFER_SIZE, "5."), DDkwd__(SORT_INTERMEDIATE_SCRATCH_CLEANUP, "ON"), DDui1__(SORT_IO_BUFFER_SIZE, "128."), DD1_200000(SORT_MAX_HEAP_SIZE_MB, "800"), DDkwd__(SORT_MEMORY_QUOTA_SYSTEM, "ON"), DD1_128(SORT_MERGE_BUFFER_UNIT_56KB, "1"), // Calibration // 04/06/2005: 1.5 DDflte_(SORT_QS_FACTOR, "1.5"), //Maximum records after which sort would switch over to //iterative heap sort. Most often in partial sort, we may want //do a quick sort or similar to avoid larger in-memory sort //setup. DDint__(SORT_REC_THRESHOLD, "1000"), // Calibration DDflte_(SORT_RS_FACTOR, "3.55"), // Calibration // 04/06/2005: 2.1 DDflte_(SORT_RW_FACTOR, "2.1"), DDflte_(SORT_TREE_NODE_SIZE, ".012"), DDkwd__(SQLMX_REGRESS, "OFF"), DDkwd__(SQLMX_SHOWDDL_SUPPRESS_ROW_FORMAT, "OFF"), DDansi_(SQLMX_UTIL_EXPLAIN_PLAN, "OFF"), SDDkwd__(SQLMX_UTIL_ONLINE_POPINDEX, "ON"), SDDui___(SSD_BMO_MAX_MEM_THRESHOLD_IN_MB, "1200"), // BertBert VV // Timeout for a streaming cursor to return to the fetch(), even if no // rows to return. The cursor is NOT closed, it just gives control to // the user again. // "0" means no timeout, just check instead. // "negative" means never timeout. // "positive" means the number of centiseconds to wait before timing out. XDDint__(STREAM_TIMEOUT, "-1"), XDDkwd__(SUBQUERY_UNNESTING, "ON"), DDkwd__(SUBQUERY_UNNESTING_P2, "ON"), DDkwd__(SUBSTRING_TRANSFORMATION, "OFF"), DDui___(SYNCDEPTH, "1"), XDDkwd__(TABLELOCK, "SYSTEM"), // This is the code base for the end user calibration cluster. // It must be either "DEBUG" or "RELEASE" #ifdef NDEBUG DDkwd__(TARGET_CODE, "RELEASE"), #else DDkwd__(TARGET_CODE, "DEBUG"), #endif // This is the frequency of the representative CPU of the end user // cluster. // TARGET_CPU_FREQUENCY units are MhZ. DDflte_(TARGET_CPU_FREQUENCY, "199."), // This is the seek time of the representative disk of the end user // cluster. // TARGET_IO_SEEK_TIME units are seconds DDflte_(TARGET_IO_SEEK_TIME, "0.0038"), // This is the sequential transfer rate for the representative // disk of the end user cluster. // TARGET_IO_SEQ_READ_RATE units are Mb/Sec DDflte_(TARGET_IO_SEQ_READ_RATE, "50.0"), // This is the transfer rate for the fast speed connection of // nodes in the end user cluster. // TARGET_MSG_LOCAL_RATE units are Mb/Sec DDflte_(TARGET_MSG_LOCAL_RATE, "10."), // This is the per msg time for the fast speed connection of // nodes in the end user cluster. // TARGET_MSG_LOCAL_TIME are seconds DDflte_(TARGET_MSG_LOCAL_TIME, "0.000125"), // This is the transfer rate for the connection among clusters // in the end user cluster (this only applies to NSK) // TARGET_MSG_REMOTE_RATE units are Mb/Sec DDflte_(TARGET_MSG_REMOTE_RATE, "1."), // This is the per msg time for the the connection among clusters // nodes in the end user cluster. // TARGET_MSG_REMOTE_TIME are seconds DDflte_(TARGET_MSG_REMOTE_TIME, "0.00125"), DDvol__(TEMPORARY_TABLE_HASH_PARTITIONS, "" ), DDkwd__(TERMINAL_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), DDint__(TEST_PASS_ONE_ASSERT_TASK_NUMBER, "-1"), DDint__(TEST_PASS_TWO_ASSERT_TASK_NUMBER, "-1"), XDDintN2(TIMEOUT, "6000"), DDflt0_(TMUDF_CARDINALITY_FACTOR, "1"), DDflt0_(TMUDF_LEAF_CARDINALITY, "1"), DDkwd__(TOTAL_RESOURCE_COSTING, "ON"), DDint__(TRAF_ALIGNED_FORMAT_ADD_COL_METHOD, "2"), DDkwd__(TRAF_ALIGNED_ROW_FORMAT, "OFF"), DDkwd__(TRAF_ALLOW_ESP_COLOCATION, "OFF"), DDkwd__(TRAF_ALLOW_SELF_REF_CONSTR, "ON"), DDkwd__(TRAF_BLOB_AS_VARCHAR, "ON"), //set to OFF to enable Lobs support DDkwd__(TRAF_BOOTSTRAP_MD_MODE, "OFF"), DDkwd__(TRAF_CLOB_AS_VARCHAR, "ON"), //set to OFF to enable Lobs support DDkwd__(TRAF_COL_LENGTH_IS_CHAR, "ON"), DDansi_(TRAF_CREATE_TABLE_WITH_UID, ""), DDkwd__(TRAF_DEFAULT_COL_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), DDkwd__(TRAF_ENABLE_ORC_FORMAT, "OFF"), DDkwd__(TRAF_INDEX_CREATE_OPT, "OFF"), DDkwd__(TRAF_LOAD_CONTINUE_ON_ERROR, "OFF"), DD_____(TRAF_LOAD_ERROR_COUNT_ID, "" ), DD_____(TRAF_LOAD_ERROR_COUNT_TABLE, "ERRORCOUNTER" ), DD_____(TRAF_LOAD_ERROR_LOGGING_LOCATION, "/bulkload/logs/" ), DDkwd__(TRAF_LOAD_FORCE_CIF, "ON"), DDkwd__(TRAF_LOAD_LOG_ERROR_ROWS, "OFF"), DDint__(TRAF_LOAD_MAX_ERROR_ROWS, "0"), DDint__(TRAF_LOAD_MAX_HFILE_SIZE, "10240"), // in MB -->10GB by default DDkwd__(TRAF_LOAD_PREP_ADJUST_PART_FUNC, "ON"), DDkwd__(TRAF_LOAD_PREP_CLEANUP, "ON"), DDkwd__(TRAF_LOAD_PREP_KEEP_HFILES, "OFF"), DDkwd__(TRAF_LOAD_PREP_PHASE_ONLY, "OFF"), DDkwd__(TRAF_LOAD_PREP_SKIP_DUPLICATES , "OFF"), //need add code to check if folder exists or not. if not issue an error and ask //user to create it DD_____(TRAF_LOAD_PREP_TMP_LOCATION, "/bulkload/" ), DDkwd__(TRAF_LOAD_TAKE_SNAPSHOT , "OFF"), DDkwd__(TRAF_LOAD_USE_FOR_INDEXES, "ON"), DDkwd__(TRAF_LOAD_USE_FOR_STATS, "OFF"), // max size in bytes of a char or varchar column. DDui2__(TRAF_MAX_CHARACTER_COL_LENGTH, "200000"), DDkwd__(TRAF_NO_CONSTR_VALIDATION, "OFF"), DDkwd__(TRAF_NO_DTM_XN, "OFF"), DDint__(TRAF_NUM_HBASE_VERSIONS, "0"), DDint__(TRAF_NUM_OF_SALT_PARTNS, "-1"), DDkwd__(TRAF_RELOAD_NATABLE_CACHE, "OFF"), DD_____(TRAF_SAMPLE_TABLE_LOCATION, "/sample/"), DDint__(TRAF_SEQUENCE_CACHE_SIZE, "-1"), DDkwd__(TRAF_STRING_AUTO_TRUNCATE, "OFF"), DDkwd__(TRAF_STRING_AUTO_TRUNCATE_WARNING, "OFF"), //TRAF_TABLE_SNAPSHOT_SCAN CQD can be set to : //NONE--> Snapshot scan is disabled and regular scan is used , //SUFFIX --> Snapshot scan enabled for the bulk unload (bulk unload // behavior id not changed) //LATEST --> enabled for the scan independently from bulk unload // the latest snapshot is used if it exists DDkwd__(TRAF_TABLE_SNAPSHOT_SCAN, "NONE"), DD_____(TRAF_TABLE_SNAPSHOT_SCAN_SNAP_SUFFIX, "SNAP"), //when the estimated table size is below the threshold (in MBs) //defined by TRAF_TABLE_SNAPSHOT_SCAN_TABLE_SIZE_THRESHOLD //regular scan instead of snapshot scan //does not apply to bulk unload which maintains the old behavior DDint__(TRAF_TABLE_SNAPSHOT_SCAN_TABLE_SIZE_THRESHOLD, "1000"), //timeout before we give up when trying to create the snapshot scanner DDint__(TRAF_TABLE_SNAPSHOT_SCAN_TIMEOUT, "6000"), //location for temporary links and files produced by snapshot scan DD_____(TRAF_TABLE_SNAPSHOT_SCAN_TMP_LOCATION, "/bulkload/"), // DTM Transaction Type: MVCC, SSCC XDDkwd__(TRAF_TRANS_TYPE, "MVCC"), DDkwd__(TRAF_UNLOAD_BYPASS_LIBHDFS, "ON"), DD_____(TRAF_UNLOAD_DEF_DELIMITER, "|" ), DD_____(TRAF_UNLOAD_DEF_NULL_STRING, "" ), DD_____(TRAF_UNLOAD_DEF_RECORD_SEPARATOR, "\n" ), DDint__(TRAF_UNLOAD_HDFS_COMPRESS, "0"), DDkwd__(TRAF_UNLOAD_SKIP_WRITING_TO_FILES, "OFF"), DDkwd__(TRAF_UPSERT_ADJUST_PARAMS, "OFF"), DDkwd__(TRAF_UPSERT_AUTO_FLUSH, "OFF"), DDint__(TRAF_UPSERT_WB_SIZE, "2097152"), DDkwd__(TRAF_UPSERT_WRITE_TO_WAL, "OFF"), DDkwd__(TRAF_USE_RWRS_FOR_MD_INSERT, "ON"), DDkwd__(TRY_DP2_REPARTITION_ALWAYS, "OFF"), SDDkwd__(TRY_PASS_ONE_IF_PASS_TWO_FAILS, "OFF"), // Disallow/Allow TSJs in MultiJoin framework DDkwd__(TSJS_SPOIL_JBB, "OFF"), // type a CASE expression or ValueIdUnion as varchar if its leaves // are of type CHAR of unequal length DDkwd__(TYPE_UNIONED_CHAR_AS_VARCHAR, "ON"), // UDF scalar indicating maximum number of rows out for each row in. DDui___(UDF_FANOUT, "1"), // Must be in form <cat>.<sch>. Delimited catalog names not allowed. DD_____(UDF_METADATA_SCHEMA, "TRAFODION.\"_UDF_\""), DDkwd__(UDF_SUBQ_IN_AGGS_AND_GBYS, "SYSTEM"), XDDui___(UDR_DEBUG_FLAGS, "0"), // see sqludr/sqludr.h for values SDD_____(UDR_JAVA_OPTIONS, "OFF"), DD_____(UDR_JAVA_OPTION_DELIMITERS, " "), DDkwd__(UNAVAILABLE_PARTITION, "STOP"), // "?" used? DDkwd__(UNC_PROCESS, "OFF"), SDDkwd__(UNIQUE_HASH_JOINS, "SYSTEM"), SDDui___(UNIQUE_HASH_JOIN_MAX_INNER_SIZE, "1000"), SDDui___(UNIQUE_HASH_JOIN_MAX_INNER_SIZE_PER_INSTANCE, "100"), SDDui___(UNIQUE_HASH_JOIN_MAX_INNER_TABLES, "2"), DDui___(UNOPTIMIZED_ESP_BUFFER_SIZE_DOWN, "31000"), DDui___(UNOPTIMIZED_ESP_BUFFER_SIZE_UP, "31000"), DDui1__(UPDATED_BYTES_PER_ESP, "400000"), DDkwd__(UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY,"ON"), DDkwd__(UPD_ABORT_ON_ERROR, "OFF"), XDDkwd__(UPD_ORDERED, "ON"), DDkwd__(UPD_PARTIAL_ON_ERROR, "OFF"), DDkwd__(UPD_SAVEPOINT_ON_ERROR, "ON"), DDkwd__(USER_EXPERIENCE_LEVEL, "BEGINNER"), // ------------------------------------------------------------------------ // This default will use a new type of an ASSERT, CCMPASSERT as a CMPASSERT // when ON, else use that as a DCMPASSERT. Changed this default to OFF // just before the final build for R2 07/23/2004 RV // ------------------------------------------------------------------------- DDkwd__(USE_CCMPASSERT_AS_CMPASSERT, "OFF"), DDkwd__(USE_DENSE_BUFFERS, "ON"), // Use Hive tables as source for traf ustat and popindex DDkwd__(USE_HIVE_SOURCE, ""), // Use large queues on RHS of Flow/Nested Join when appropriate DDkwd__(USE_LARGE_QUEUES, "ON"), DDkwd__(USE_MAINTAIN_CONTROL_TABLE, "OFF"), // Adaptive segmentation, use operator max to determine degree of parallelism DDui___(USE_OPERATOR_MAX_FOR_DOP, "1"), // Specify the number of partitions before invoking parallel label operations DDui1__(USE_PARALLEL_FOR_NUM_PARTITIONS, "32"), DDkwd__(USTAT_ADD_SALTED_KEY_PREFIXES_FOR_MC, "OFF"), // When ON, generate MCs for primary key prefixes as well as full key // of salted table when ON EVERY KEY or ON EVERY COLUMN is specified. DDkwd__(USTAT_ATTEMPT_ESP_PARALLELISM, "ON"), // for reading column values DDui___(USTAT_AUTOMATION_INTERVAL, "0"), XDDflt0_(USTAT_AUTO_CV_SAMPLE_SLOPE, "0.5"), // CV multiplier for sampling %. DDkwd__(USTAT_AUTO_EMPTYHIST_TWO_TRANS, "OFF"), // When ON empty hist insert will be 2 trans. DDkwd__(USTAT_AUTO_FOR_VOLATILE_TABLES, "OFF"), // Toggle for vol tbl histogram usage DDui___(USTAT_AUTO_MAX_HIST_AGE, "0"), // Age of oldest unused histogram - only applies when automation is on. DDui1__(USTAT_AUTO_MC_MAX_WIDTH, "10"), // The max columns in an MC histogram for automation. DDui___(USTAT_AUTO_MISSING_STATS_LEVEL, "4"), // Similar to HIST_MISSING_STATS_WARNING_LEVEL, but controls // if automation inserts missing stats to HISTOGRAMS table. // 0 - insert no stats, // 1 - insert single col hists, // 2 - insert all single col hists and MC hists for scans, // 3 - insert all single col hists and MC stats for scans and joins. // 4 - insert all single col hists and MC stats for scans, joins, and groupbys. XDDui___(USTAT_AUTO_PRIORITY, "150"), // Priority of ustats under USAS. DDui1__(USTAT_AUTO_READTIME_UPDATE_INTERVAL, "86400"), // Seconds between updates of READ_COUNT. // Should be > CACHE_HISTOGRAMS_REFRESH_INTERVAL. DDkwd__(USTAT_CHECK_HIST_ACCURACY, "OFF"), DDui1__(USTAT_CLUSTER_SAMPLE_BLOCKS, "1"), DDkwd__(USTAT_COLLECT_FILE_STATS, "ON"), // do we collect file stats DDkwd__(USTAT_COLLECT_MC_SKEW_VALUES, "OFF"), DD_____(USTAT_CQDS_ALLOWED_FOR_SPAWNED_COMPILERS, ""), // list of CQDs that can be pushed to seconday compilers // CQDs are delimited by "," DDkwd__(USTAT_DEBUG_FORCE_FETCHCOUNT, "OFF"), DD_____(USTAT_DEBUG_TEST, ""), DDflte_(USTAT_DSHMAX, "50.0"), DDkwd__(USTAT_ESTIMATE_HBASE_ROW_COUNT, "OFF"), DDkwd__(USTAT_FETCHCOUNT_ACTIVE, "OFF"), DDkwd__(USTAT_FORCE_MOM_ESTIMATOR, "OFF"), DDkwd__(USTAT_FORCE_TEMP, "OFF"), DDflt0_(USTAT_FREQ_SIZE_PERCENT, "0.5"), // >100 effectively disables DDflt0_(USTAT_GAP_PERCENT, "10.0"), DDflt0_(USTAT_GAP_SIZE_MULTIPLIER, "1.5"), DDui___(USTAT_HBASE_SAMPLE_RETURN_INTERVAL, "10000000"), // Avoid scanner timeout by including on average at // least one row per this many when sampling within HBase. DDflt0_(USTAT_INCREMENTAL_FALSE_PROBABILITY, "0.01"), DDkwd__(USTAT_INCREMENTAL_UPDATE_STATISTICS, "ON"), DDkwd__(USTAT_INSERT_TO_NONAUDITED_TABLE, "OFF"), // Used internally to overcome problem in which insert // to the non-audited sample table must be done on same // process it was created on. This CQD is NOT externalized. DDkwd__(USTAT_INTERNAL_SORT, "HYBRID"), DDkwd__(USTAT_IS_IGNORE_UEC_FOR_MC, "OFF"), // if MCIS is ON, use IS to compute SC stats DDflt_0_1(USTAT_IS_MEMORY_FRACTION, "0.6"), DDflt0_(USTAT_IUS_INTERVAL_ROWCOUNT_CHANGE_THRESHOLD, "0.05"), DDflt0_(USTAT_IUS_INTERVAL_UEC_CHANGE_THRESHOLD, "0.05"), DDui1_6(USTAT_IUS_MAX_NUM_HASH_FUNCS, "5"), // the max disk space IUS CBFs can use is // MINOF(USTAT_IUS_MAX_PERSISTENT_DATA_IN_MB, // TtotalSpace * USTAT_IUS_MAX_PERSISTENT_DATA_IN_PERCENTAGE) DDui___(USTAT_IUS_MAX_PERSISTENT_DATA_IN_MB, "50000"), // 50GB DDflt0_(USTAT_IUS_MAX_PERSISTENT_DATA_IN_PERCENTAGE, "0.20"), // 20% of the total DDui1_6(USTAT_IUS_MAX_TRANSACTION_DURATION, "20"), // in minutes DDkwd__(USTAT_IUS_NO_BLOCK, "OFF"), DDansi_(USTAT_IUS_PERSISTENT_CBF_PATH, "SYSTEM"), // if turned on, IUS incremental statements will not take any "on existing" or // "on necessary" clause DDkwd__(USTAT_IUS_SIMPLE_SYNTAX, "OFF"), DDflt0_(USTAT_IUS_TOTAL_ROWCOUNT_CHANGE_THRESHOLD, "0.05"), DDflt0_(USTAT_IUS_TOTAL_UEC_CHANGE_THRESHOLD, "0.05"), DDkwd__(USTAT_IUS_USE_PERIODIC_SAMPLING, "OFF"), DDkwd__(USTAT_JIT_LOGGING, "OFF"), DDkwd__(USTAT_LOCK_HIST_TABLES, "OFF"), DD_____(USTAT_LOG, "ULOG"), DDui30_246(USTAT_MAX_CHAR_BOUNDARY_LEN, "30"), // Values can be 30-246. XDDui___(USTAT_MAX_READ_AGE_IN_MIN, "5760"), DDui___(USTAT_MAX_SAMPLE_AGE, "365"), // For R2.5 set to a year so user created samples won't be removed. DDflt0_(USTAT_MIN_CHAR_UEC_FOR_IS, "0.2"), // minimum UEC for char type to use internal sort DDflt0_(USTAT_MIN_DEC_BIN_UEC_FOR_IS, "0.03"), // minimum UEC for binary types to use internal sort DDflt0_(USTAT_MIN_ESTIMATE_FOR_ROWCOUNT, "10000000"), DDui1__(USTAT_MIN_ROWCOUNT_FOR_CTS_SAMPLE, "10000"), XDDui1__(USTAT_MIN_ROWCOUNT_FOR_LOW_SAMPLE, "1000000"), XDDui1__(USTAT_MIN_ROWCOUNT_FOR_SAMPLE, "10000"), DDflt0_(USTAT_MODIFY_DEFAULT_UEC, "0.05"), XDDui1__(USTAT_NECESSARY_SAMPLE_MAX, "5000000"), // Maximum sample size with NECESSARY DDui1__(USTAT_NUM_MC_GROUPS_FOR_KEYS, "10"), XDDpct__(USTAT_OBSOLETE_PERCENT_ROWCOUNT, "15"), DDkwd__(USTAT_PROCESS_GAPS, "ON"), DD0_255(USTAT_RETRY_DELAY, "100"), DD0_255(USTAT_RETRY_LIMIT, "3"), DD0_255(USTAT_RETRY_NEC_COLS_LIMIT, "3"), // by default, use retry for AddNecessaryColumns DDui1__(USTAT_RETRY_SECURITY_COUNT, "120"), DDpct__(USTAT_SAMPLE_PERCENT_DIFF, "10"), DDansi_(USTAT_SAMPLE_TABLE_NAME, " "), DDansi_(USTAT_SAMPLE_TABLE_NAME_CREATE, " "), DDkwd__(USTAT_SHOW_MC_INTERVAL_INFO, "OFF"), DDkwd__(USTAT_SHOW_MFV_INFO, "OFF"), DDflte_(USTAT_UEC_HI_RATIO, "0.5"), DDflte_(USTAT_UEC_LOW_RATIO, "0.1"), DDkwd__(USTAT_USE_BACKING_SAMPLE, "OFF"), DDkwd__(USTAT_USE_BULK_LOAD, "OFF"), DDkwd__(USTAT_USE_GROUPING_FOR_SAMPLING, "ON"), DDkwd__(USTAT_USE_INTERNAL_SORT_FOR_MC, "OFF"), DDkwd__(USTAT_USE_INTERNAL_SORT_FOR_MC_LOOP, "ON"), DDkwd__(USTAT_USE_INTERNAL_SORT_FOR_MC_NEW_HIST, "OFF"), // TEMP FOR TESTING -- SHOULD REMOVE DDkwd__(USTAT_USE_IS_WHEN_NO_STATS, "ON"), // use IS when no histograms exist for the column DDkwd__(USTAT_USE_SIDETREE_INSERT, "ON"), DDkwd__(USTAT_USE_SLIDING_SAMPLE_RATIO, "ON"), // Trend sampling rate down w/increasing table size, going // flat at 1%. DDkwd__(VALIDATE_RFORK_REDEF_TS, "OFF"), DDkwd__(VALIDATE_VIEWS_AT_OPEN_TIME, "OFF"), //this is the default length of a param which is typed as a VARCHAR. DD1_4096(VARCHAR_PARAM_DEFAULT_SIZE, "255"), // allows pcodes for varchars DDkwd__(VARCHAR_PCODE, "ON"), DDansi_(VOLATILE_CATALOG, ""), DDkwd__(VOLATILE_SCHEMA_IN_USE, "OFF"), // if this is set to ON or SYSTEM, then find a suitable key among all the // columns of a volatile table. // If this is set to OFF, and there is no user specified primary key or // store by clause, then make the first column of the volatile table // to be the clustering key. DDkwd__(VOLATILE_TABLE_FIND_SUITABLE_KEY, "SYSTEM"), // if this is set, and there is no user specified primary key or // store by clause, then make the first column of the volatile table // to be the clustering key. // Default is ON. DDkwd__(VOLATILE_TABLE_FIRST_COL_IS_CLUSTERING_KEY, "ON"), DDkwd__(VSBB_TEST_MODE, "OFF"), XDDkwd__(WMS_CHILD_QUERY_MONITORING, "OFF"), XDDkwd__(WMS_QUERY_MONITORING, "OFF"), // amount of work we are willing to assign per CPU for any query // not running at full system parallelism SDDflte_(WORK_UNIT_ESP, "0.08"), SDDflte_(WORK_UNIT_ESP_DATA_COPY_COST, "0.001"), // ZIG_ZAG_TREES ON means do ZIG_ZAG_TREES // $$$ OFF for beta DDkwd__(ZIG_ZAG_TREES, "SYSTEM"), DDkwd__(ZIG_ZAG_TREES_CONTROL, "OFF") }; // // NOTE: The defDefIx_ array is an array of integers that map // 'enum' values to defaultDefaults[] entries. // The defDefIx_ array could probably be made global static // since all threads should map the same 'enum' values to the // same defaultDefaults[] entries. Such as change is being // left to a future round of optimizations. // static THREAD_P size_t defDefIx_[__NUM_DEFAULT_ATTRIBUTES]; inline static const char *getAttrName(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].attrName; } inline static const char *getDefaultDefaultValue(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].value; } inline static const DefaultValidator *validator(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].validator; } inline static UInt32 getFlags(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].flags; } inline static NABoolean isFlagOn(Int32 attrEnum, NADefaultFlags flagbit) { #pragma nowarn(1506) // warning elimination return defaultDefaults[defDefIx_[attrEnum]].flags & (UInt32)flagbit; #pragma warn(1506) // warning elimination } inline static void setFlagOn(Int32 attrEnum, NADefaultFlags flagbit) { defaultDefaults[defDefIx_[attrEnum]].flags |= (UInt32)flagbit; } static NABoolean isSynonymOfRESET(NAString &value) { return (value == "RESET"); } static NABoolean isSynonymOfSYSTEM(Int32 attrEnum, NAString &value) { if (value == "") return TRUE; if (value == "SYSTEM") return !isFlagOn(attrEnum, DEFAULT_ALLOWS_SEPARATE_SYSTEM); if (value == "ENABLE"){ value = "ON"; return FALSE; } else if (value == "DISABLE"){ value = "OFF"; return FALSE; } // if (getDefaultDefaultValue(attrEnum) != NAString("DISABLE")) // cast reqd!! // return TRUE; // else // value = "ON"; return FALSE; } size_t NADefaults::numDefaultAttributes() { return (size_t)__NUM_DEFAULT_ATTRIBUTES; } // Returns current defaults in alphabetic order (for SHOWCONTROL listing). const char *NADefaults::getCurrentDefaultsAttrNameAndValue( size_t ix, const char* &name, const char* &value, NABoolean userDefaultsOnly) { if (ix < numDefaultAttributes()) { NABoolean get = FALSE; if (userDefaultsOnly) { // if this default was entered by user, return it. get = userDefault(defaultDefaults[ix].attrEnum); } else { // display the control if // - it is externalized or // - it is for support only and a CQD is set to show those, or // - a CQD is set to show all the controls get = (defaultDefaults[ix].flags & DEFAULT_IS_EXTERNALIZED) || // bit-AND ((defaultDefaults[ix].flags & DEFAULT_IS_FOR_SUPPORT) && (getToken(SHOWCONTROL_SHOW_SUPPORT) == DF_ON)) || (getToken(SHOWCONTROL_SHOW_ALL) == DF_ON); } if (get) { name = defaultDefaults[ix].attrName; value = currentDefaults_[defaultDefaults[ix].attrEnum]; return name; } } return name = value = NULL; } // ----------------------------------------------------------------------- // convert the default defaults into a table organized by enum values // ----------------------------------------------------------------------- void NADefaults::initCurrentDefaultsWithDefaultDefaults() { const size_t numAttrs = numDefaultAttributes(); CMPASSERT_STRING (numAttrs == sizeof(defaultDefaults) / sizeof(DefaultDefault), "Check sqlcomp/DefaultConstants.h for a gap in enum DefaultConstants or sqlcomp/nadefaults.cpp for duplicate entries in array defaultDefaults[]."); deleteMe(); SqlParser_NADefaults_Glob = SqlParser_NADefaults_ = new NADHEAP SqlParser_NADefaults(); provenances_ = new NADHEAP char [numAttrs]; // enum fits in 2 bits flags_ = new NADHEAP char [numAttrs]; resetToDefaults_ = new NADHEAP char * [numAttrs]; currentDefaults_ = new NADHEAP const char * [numAttrs]; currentFloats_ = new NADHEAP float * [numAttrs]; currentTokens_ = new NADHEAP DefaultToken * [numAttrs]; currentState_ = INIT_DEFAULT_DEFAULTS; heldDefaults_ = new NADHEAP char * [numAttrs]; // reset all entries size_t i = 0; for (i = 0; i < numAttrs; i++) { provenances_[i] = currentState_; flags_[i] = 0; defDefIx_[i] = 0; } memset( resetToDefaults_, 0, sizeof(char *) * numAttrs ); memset( currentDefaults_, 0, sizeof(char *) * numAttrs ); memset( currentFloats_, 0, sizeof(float *) * numAttrs ); memset( currentTokens_, 0, sizeof(DefaultToken *) * numAttrs ); memset( heldDefaults_, 0, sizeof(char *) * numAttrs ); #ifndef NDEBUG // This env-var turns on consistency checking of default-defaults and // other static info. The env-var does not get passed from sqlci to arkdev // until *AFTER* the initialization code runs, so you must do a static // arkcmp compile to do this checking. TEST050 does this, in fact. NABoolean nadval = !!getenv("NADEFAULTS_VALIDATE"); #endif // for each entry of the (alphabetically sorted) default defaults // table, enter the default default into the current default table // which is sorted by enum values for (i = 0; i < numAttrs; i++) { // the enum must be less than the max (if this assert fails // you might have made the range of constants in the enum // non-contiguous by assigning hard-coded numbers to some entries) CMPASSERT(ENUM_RANGE_CHECK(defaultDefaults[i].attrEnum)); // can't have the same enum value twice in defaultDefaults CMPASSERT(currentDefaults_[defaultDefaults[i].attrEnum] == NULL); // set currentDefaults_[enum] to the static string, // leaving the "allocated from heap" flag as FALSE char * value = new NADHEAP char[strlen(defaultDefaults[i].value) + 1]; strcpy(value,defaultDefaults[i].value); // trim trailing spaces (except UDR_JAVA_OPTION_DELIMITERS, since // trailing space is allowed for it) if (defaultDefaults[i].attrEnum != UDR_JAVA_OPTION_DELIMITERS) { Lng32 len = strlen(value); while ((len > 0) && (value[len-1] == ' ')) { value[len-1] = 0; len--; } } currentDefaults_[defaultDefaults[i].attrEnum] = value; // set up our backlink which maps [enum] to its defaultDefaults entry defDefIx_[defaultDefaults[i].attrEnum] = i; // LCOV_EXCL_START // for debugging only #ifndef NDEBUG if (nadval) { // additional sanity checking we want to do occasionally NAString v; // ensure the static table really is in alphabetic order CMPASSERT(i == 0 || strcmp(defaultDefaults[i-1].attrName, defaultDefaults[i].attrName) < 0); // ensure these names are fit and trim and in canonical form v = defaultDefaults[i].attrName; TrimNAStringSpace(v); v.toUpper(); CMPASSERT(v == defaultDefaults[i].attrName); // validate initial default default values CMPASSERT(defaultDefaults[i].validator); defaultDefaults[i].validator->validate( defaultDefaults[i].value, this, defaultDefaults[i].attrEnum, +1/*warning*/); // ensure these values are fit and trim and in canonical form v = defaultDefaults[i].value; TrimNAStringSpace(v); defaultDefaults[i].validator->applyUpper(v); CMPASSERT(v == defaultDefaults[i].value); // alert the programmer if (isSynonymOfSYSTEM(defaultDefaults[i].attrEnum, v)) if (v != "" || defaultDefaults[i].validator != &validateAnsiName) cerr << "\nWARNING: " << defaultDefaults[i].attrName << " has SYSTEM default (" << v << ");\n\t read NOTE 2 in " << __FILE__ << endl; if (isSynonymOfRESET(v)) if (v != "" || defaultDefaults[i].validator != &validateAnsiName) cerr << "\nWARNING: " << defaultDefaults[i].attrName << " has RESET default (" << v << ");\n\t this makes no sense!" << endl; if (defaultDefaults[i].validator == &validateUnknown) cerr << "\nWARNING: " << defaultDefaults[i].attrName << " has a NO-OP validator" << endl; // the token keyword array must have no missing strings, // it must also be in alphabetic order, // each entry must be canonical, and // must have no embedded spaces (see token() method, space/uscore...) if (i == 0) for (size_t j = 0; j < DF_lastToken; j++) { CMPASSERT(keywords_[j]); CMPASSERT(j == 0 || strcmp(keywords_[j-1], keywords_[j]) < 0); NAString v(keywords_[j]); TrimNAStringSpace(v); v.toUpper(); // we know keywords must be caseINsens CMPASSERT(v == keywords_[j]); CMPASSERT(v.first(' ') == NA_NPOS); } } // if env-var #endif // NDEBUG // LCOV_EXCL_STOP } // for i // set the default value for GENERATE_EXPLAIN depending on whether // this is a static compile or a dynamic compile. if (CmpCommon::context()->GetMode() == STMT_STATIC) { currentDefaults_[GENERATE_EXPLAIN] = "ON"; currentDefaults_[DO_RUNTIME_EID_SPACE_COMPUTATION] = "ON"; currentDefaults_[DETAILED_STATISTICS] = "MEASURE"; } else { currentDefaults_[GENERATE_EXPLAIN] = "OFF"; currentDefaults_[DO_RUNTIME_EID_SPACE_COMPUTATION] = "OFF"; currentDefaults_[DETAILED_STATISTICS] = "OPERATOR"; } // set the default value of hive_catalog to the hive_system_catalog currentDefaults_[HIVE_CATALOG] = HIVE_SYSTEM_CATALOG; // set the default value of hbase_catalog to the hbase_system_catalog currentDefaults_[HBASE_CATALOG] = HBASE_SYSTEM_CATALOG; currentDefaults_[SEABASE_CATALOG] = TRAFODION_SYSCAT_LIT; // Test for TM_USE_SSCC from ms.env. // Only a setting of TM_USE_SSCC set to 1 will change the value to SSCC. // Otherwise, the default will remain at MVCC. char * ev = getenv("TM_USE_SSCC"); Lng32 useValue = 0; if (ev) { useValue = (Lng32)str_atoi(ev, str_len(ev)); if (useValue == 1) currentDefaults_[TRAF_TRANS_TYPE] = "SSCC"; } // Begin: Temporary workaround for SQL build regressions to pass NABoolean resetNeoDefaults = FALSE; // On SQ, the way to get an envvar from inside a un-attached process // is to use the msg_getenv_str() call and set the env inside // the SQ_PROP_ property file. In this case the property // file is $MY_SQROOT/etc/SQ_PROP_tdm_arkcmp which contains the line // "SQLMX_REGRESS=1". This file was generated by tools/setuplnxenv. // resetNeoDefaults = (msg_getenv_str("SQLMX_REGRESS") != NULL); resetNeoDefaults = (getenv("SQLMX_REGRESS") != NULL); if(resetNeoDefaults) { // turn similarity check OFF stats during regressions run. currentDefaults_[SIMILARITY_CHECK] = "OFF"; // turn on ALL stats during regressions run. currentDefaults_[COMP_BOOL_157] = "ON"; // turn on INTERNAL format for SHOWDDL statements currentDefaults_[SHOWDDL_DISPLAY_FORMAT] = "INTERNAL"; } // End: Temporary workaround for SQL build regressions to pass // Cache all the default keywords up front, // leaving other non-keyword token to be cached on demand. // The "keyword" that is not cached is the kludge/clever trick that // Matt puts in for NATIONAL_CHARSET. NAString tmp( NADHEAP ); for ( i = 0; i < numAttrs; i++ ) { #ifndef NDEBUG #pragma nowarn(1506) // warning elimination const DefaultValidatorType validatorType = validator(i)->getType(); #pragma warn(1506) // warning elimination #endif #pragma nowarn(1506) // warning elimination if ( validator(i)->getType() == VALID_KWD && (i != NATIONAL_CHARSET) && (i != INPUT_CHARSET) && (i != ISO_MAPPING) ) #pragma warn(1506) // warning elimination { currentTokens_[i] = new NADHEAP DefaultToken; // do not call 'token' method as it will return an error if FALSE // is to be inserted. Just directly assign DF_OFF to non-resetable defs. if (isNonResetableAttribute(defaultDefaults[defDefIx_[i]].attrName)) *currentTokens_[i] = DF_OFF; else #pragma nowarn(1506) // warning elimination *currentTokens_[i] = token( i, tmp ); #pragma warn(1506) // warning elimination } } if (getToken(MODE_SEABASE) == DF_ON) { currentDefaults_[CATALOG] = TRAFODION_SYSCAT_LIT; if (getToken(SEABASE_VOLATILE_TABLES) == DF_ON) { NAString sbCat = getValue(SEABASE_CATALOG); CmpCommon::context()->sqlSession()->setVolatileCatalogName(sbCat, TRUE); } } SqlParser_NADefaults_->NAMETYPE_ = getToken(NAMETYPE); SqlParser_NADefaults_->NATIONAL_CHARSET_ = CharInfo::getCharSetEnum(currentDefaults_[NATIONAL_CHARSET]); SqlParser_NADefaults_->ISO_MAPPING_ = CharInfo::getCharSetEnum(currentDefaults_[ISO_MAPPING]); SqlParser_NADefaults_->DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(currentDefaults_[DEFAULT_CHARSET]); SqlParser_NADefaults_->ORIG_DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(currentDefaults_[DEFAULT_CHARSET]); // Set the NAString_isoMappingCS memory cache for use by routines // ToInternalIdentifier() and ToAnsiIdentifier[2|3]() in module // w:/common/NAString[2].cpp. These routines currently cannot // access SqlParser_ISO_MAPPING directly due to the complex // build hierarchy. NAString_setIsoMapCS((SQLCHARSET_CODE) SqlParser_NADefaults_->ISO_MAPPING_); } NADefaults::NADefaults(NAMemory * h) : provenances_(NULL) , flags_(NULL) , resetToDefaults_(NULL) , currentDefaults_(NULL) , currentFloats_(NULL) , currentTokens_(NULL) , heldDefaults_(NULL) , currentState_(UNINITIALIZED) , readFromSQDefaultsTable_(FALSE) , SqlParser_NADefaults_(NULL) , catSchSetToUserID_(NULL) , heap_(h) , resetAll_(FALSE) , defFlags_(0) { static THREAD_P NABoolean systemParamterUpdated = FALSE; // First (but only if NSK-LITE Services exist), // write system parameters (attributes DEF_*) into DefaultDefaults, if (!systemParamterUpdated && !cmpCurrentContext->isStandalone()) { updateSystemParameters(); systemParamterUpdated = TRUE; } // then copy DefaultDefaults into CurrentDefaults. initCurrentDefaultsWithDefaultDefaults(); // Set additional defaultDefaults flags: // If an attr allows ON/OFF/SYSTEM and the default-default is not SYSTEM, // then you must set this flag. Otherwise, CQD attr 'system' will revert // the value back to the default-default, which is not SYSTEM. // setFlagOn(...attr..., DEFAULT_ALLOWS_SEPARATE_SYSTEM); // // (See attESPPara in OptPhysRelExpr.cpp.) setFlagOn(ATTEMPT_ESP_PARALLELISM, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(HJ_TYPE, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(ZIG_ZAG_TREES, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(COMPRESSED_INTERNAL_FORMAT, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(COMPRESSED_INTERNAL_FORMAT_BMO, DEFAULT_ALLOWS_SEPARATE_SYSTEM); } NADefaults::~NADefaults() { deleteMe(); } void NADefaults::deleteMe() { if (resetToDefaults_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(resetToDefaults_[i], NADHEAP); NADELETEBASIC(resetToDefaults_, NADHEAP); } if (currentDefaults_) { for (size_t i = numDefaultAttributes(); i--; ) if (provenances_[i] > INIT_DEFAULT_DEFAULTS) NADELETEBASIC(currentDefaults_[i], NADHEAP); NADELETEBASIC(currentDefaults_, NADHEAP); } if (currentFloats_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentFloats_[i], NADHEAP); NADELETEBASIC(currentFloats_, NADHEAP); } if (currentTokens_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentTokens_[i], NADHEAP); NADELETEBASIC(currentTokens_, NADHEAP); } if (heldDefaults_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(heldDefaults_[i], NADHEAP); NADELETEBASIC(heldDefaults_, NADHEAP); } for (CollIndex i = tablesRead_.entries(); i--; ) tablesRead_.removeAt(i); NADELETEBASIC(provenances_, NADHEAP); NADELETEBASIC(flags_, NADHEAP); NADELETE(SqlParser_NADefaults_, SqlParser_NADefaults, NADHEAP); } // ----------------------------------------------------------------------- // Find the attribute name from its enum value in the defaults table. // ----------------------------------------------------------------------- const char *NADefaults::lookupAttrName(Int32 attrEnum, Int32 errOrWarn) { if (ATTR_RANGE_CHECK) return getAttrName(attrEnum); static THREAD_P char noSuchAttr[20]; sprintf(noSuchAttr, "**%d**", attrEnum); if (errOrWarn) // $0~string0 is not the name of any DEFAULTS table attribute. *CmpCommon::diags() << DgSqlCode(ERRWARN(2050)) << DgString0(noSuchAttr); return noSuchAttr; } // ----------------------------------------------------------------------- // Find the enum value from its string representation in the defaults table. // ----------------------------------------------------------------------- enum DefaultConstants NADefaults::lookupAttrName(const char *name, Int32 errOrWarn, Int32 *position) { NAString attrName(name); TrimNAStringSpace(attrName, FALSE, TRUE); // trim trailing blanks only attrName.toUpper(); // start with the full range of defaultDefaults size_t lo = 0; size_t hi = numDefaultAttributes(); size_t split; Int32 cresult; // perform a binary search in the ordered table defaultDefaults do { // compare the token with the middle entry in the range split = (lo + hi) / 2; cresult = attrName.compareTo(defaultDefaults[split].attrName); if (cresult < 0) { // token < split value, search first half of range hi = split; } else if (cresult > 0) { if (lo == split) // been there, done that { CMPASSERT(lo == hi-1); break; } // token > split value, search second half of range lo = split; } } while (cresult != 0 && lo < hi); if (position != 0) #pragma nowarn(1506) // warning elimination *position = split; #pragma warn(1506) // warning elimination // if the last comparison result was equal, return value at "split" if (cresult == 0) return defaultDefaults[split].attrEnum; // otherwise the string has no corresponding enum value if (errOrWarn) // $0~string0 is not the name of any DEFAULTS table attribute. *CmpCommon::diags() << DgSqlCode(ERRWARN(2050)) << DgString0(attrName); return __INVALID_DEFAULT_ATTRIBUTE; // negative } #define WIDEST_CPUARCH_VALUE 30 // also wider than any utoa_() result static void utoa_(UInt32 val, char *buf) { sprintf(buf, "%u", val); } static void itoa_(Int32 val, char *buf) { sprintf(buf, "%d", val); } static void ftoa_(float val, char *buf) { snprintf(buf, WIDEST_CPUARCH_VALUE, "%0.2f", val); } // Updates the system parameters in the defaultDefaults table. void NADefaults::updateSystemParameters(NABoolean reInit) { static const char *arrayOfSystemParameters[] = { "DEF_CPU_ARCHITECTURE", "DEF_DISCS_ON_CLUSTER", "DEF_INSTRUCTIONS_SECOND", "DEF_PAGE_SIZE", "DEF_LOCAL_CLUSTER_NUMBER", "DEF_LOCAL_SMP_NODE_NUMBER", "DEF_NUM_SMP_CPUS", "MAX_ESPS_PER_CPU_PER_OP", "DEFAULT_DEGREE_OF_PARALLELISM", "DEF_NUM_NODES_IN_ACTIVE_CLUSTERS", // this is deliberately not in the list: "DEF_CHUNK_SIZE", "DEF_NUM_BM_CHUNKS", "DEF_PHYSICAL_MEMORY_AVAILABLE", //returned in KB not bytes "DEF_TOTAL_MEMORY_AVAILABLE", //returned in KB not bytes "DEF_VIRTUAL_MEMORY_AVAILABLE" , "GEN_MAX_NUM_PART_DISK_ENTRIES" , "USTAT_IUS_PERSISTENT_CBF_PATH" }; //returned in KB not bytes char valuestr[WIDEST_CPUARCH_VALUE]; // Set up global cluster information. setUpClusterInfo(CmpCommon::contextHeap()); // Extract SMP node number and cluster number where this arkcmp is running. short nodeNum = 0; Int32 clusterNum = 0; OSIM_getNodeAndClusterNumbers(nodeNum, clusterNum); // First (but only if NSK-LITE Services exist), // write system parameters (attributes DEF_*) into DefaultDefaults, // then copy DefaultDefaults into CurrentDefaults. if (!cmpCurrentContext->isStandalone()) { size_t numElements = sizeof(arrayOfSystemParameters) / sizeof(char *); for (size_t i = 0; i < numElements; i++) { Int32 j; // perform a lookup for the string, using a binary search lookupAttrName(arrayOfSystemParameters[i], -1, &j); CMPASSERT(j >= 0); if(reInit) NADELETEBASIC(defaultDefaults[j].value,NADHEAP); char *newValue = new (GetCliGlobals()->exCollHeap()) char[WIDEST_CPUARCH_VALUE]; newValue[0] = '\0'; defaultDefaults[j].value = newValue; switch(defaultDefaults[j].attrEnum) { case DEF_CPU_ARCHITECTURE: switch(gpClusterInfo->cpuArchitecture()) { // 123456789!1234567890@123456789 case CPU_ARCH_INTEL_80386: strcpy(newValue, "INTEL_80386"); break; case CPU_ARCH_INTEL_80486: strcpy(newValue, "INTEL_80486"); break; case CPU_ARCH_PENTIUM: strcpy(newValue, "PENTIUM"); break; case CPU_ARCH_PENTIUM_PRO: strcpy(newValue, "PENTIUM_PRO"); break; case CPU_ARCH_MIPS: strcpy(newValue, "MIPS"); break; case CPU_ARCH_ALPHA: strcpy(newValue, "ALPHA"); break; case CPU_ARCH_PPC: strcpy(newValue, "PPC"); break; default: strcpy(newValue, "UNKNOWN"); break; } if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j], FALSE); break; case DEF_DISCS_ON_CLUSTER: strcpy(newValue, "8"); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_PAGE_SIZE: utoa_(gpClusterInfo->pageSize(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_LOCAL_CLUSTER_NUMBER: utoa_(clusterNum, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_LOCAL_SMP_NODE_NUMBER: utoa_(nodeNum, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_NUM_SMP_CPUS: utoa_(gpClusterInfo->numberOfCpusPerSMP(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEFAULT_DEGREE_OF_PARALLELISM: { Lng32 x = 2; utoa_(x, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case MAX_ESPS_PER_CPU_PER_OP: { // set 2 ESPs per node, as a starting point. #define DEFAULT_ESPS_PER_NODE 2 Lng32 numESPsPerNode = DEFAULT_ESPS_PER_NODE; Lng32 coresPerNode = 1; // Make sure the gpClusterInfo points at an NAClusterLinux object. // In osim simulation mode, the pointer can point at a NAClusterNSK // object, for which the method numTSEsForPOS() is not defined. NAClusterInfoLinux* gpLinux = dynamic_cast<NAClusterInfoLinux*>(gpClusterInfo); // number of POS TSE Lng32 numTSEsPerCluster = gpLinux->numTSEsForPOS(); // cluster nodes Lng32 nodesdPerCluster = gpClusterInfo->getTotalNumberOfCPUs(); // TSEs per node Lng32 TSEsPerNode = numTSEsPerCluster/nodesdPerCluster; // cores per node coresPerNode = gpClusterInfo->numberOfCpusPerSMP(); // For Linux/nt, we conservatively allocate ESPs per node as follows // - 1 ESP per 2 cpu cores if cores are equal or less than TSEs // - 1 ESP per TSE if number of cores is more than double the TSEs // - 1 ESP per 2 TSEs if cores are more than TSEs but less than double the TSEs // - 1 ESP per node. Only possible on NT or workstations // - number of cores less than TSEs and there are 1 or 2 cpur cores per node // - number of TSEs is less than cpu cores and there 1 or 2 TSEs per node. // This case is probable if virtual nodes are used // TSEsPerNode is 0 for arkcmps started by the seapilot universal comsumers // in this case we only consider cpu cores if ((coresPerNode <= TSEsPerNode) || (TSEsPerNode == 0)) { if (coresPerNode > 1) numESPsPerNode = DEFAULT_ESPS_PER_NODE; } else if (coresPerNode > (TSEsPerNode*2)) { numESPsPerNode = TSEsPerNode; } else if (TSEsPerNode > 1) { numESPsPerNode = TSEsPerNode/2; } else // not really needed since numESPsPerNode is set to 1 from above { numESPsPerNode = DEFAULT_ESPS_PER_NODE; } ftoa_((float)(numESPsPerNode)/(float)(coresPerNode), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case DEF_NUM_NODES_IN_ACTIVE_CLUSTERS: utoa_(((NAClusterInfoLinux*)gpClusterInfo)->numLinuxNodes(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_PHYSICAL_MEMORY_AVAILABLE: utoa_(gpClusterInfo->physicalMemoryAvailable(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_TOTAL_MEMORY_AVAILABLE: utoa_(gpClusterInfo->totalMemoryAvailable(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_VIRTUAL_MEMORY_AVAILABLE: utoa_(gpClusterInfo->virtualMemoryAvailable(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_NUM_BM_CHUNKS: { UInt32 numChunks = (UInt32) (gpClusterInfo->physicalMemoryAvailable() / def_DEF_CHUNK_SIZE / 4); utoa_(numChunks, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case DEF_INSTRUCTIONS_SECOND: { Int32 frequency, speed; frequency = gpClusterInfo->processorFrequency(); switch (gpClusterInfo->cpuArchitecture()) { case CPU_ARCH_PENTIUM_PRO: speed = (Int32) (frequency * 0.5); break; case CPU_ARCH_PENTIUM: speed = (Int32) (frequency * 0.4); break; default: speed = (Int32) (frequency * 0.3); break; } itoa_(speed, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case GEN_MAX_NUM_PART_DISK_ENTRIES: { // Make sure the gpClusterInfo points at an NAClusterLinux object. // In osim simulation mode, the pointer can point at a // NAClusterNSK object, for which the method numTSEsForPOS() is not // defined. NAClusterInfoLinux* gpLinux = dynamic_cast<NAClusterInfoLinux*>(gpClusterInfo); if ( gpLinux ) { UInt32 numTSEs = (UInt32)gpLinux->numTSEsForPOS(); utoa_(numTSEs, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults().updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } } break; case USTAT_IUS_PERSISTENT_CBF_PATH: { // set the CQD it to $HOME/cbfs const char* home = getenv("HOME"); if ( home ) { str_cat(home, "/cbfs", newValue); } } break; default: #ifndef NDEBUG cerr << "updateSystemParameters: no case for " << defaultDefaults[j].attrName << endl; #endif break; } // switch (arrayOfSystemParameters) } // for } // isStandalone } // updateSystemParameters() //============================================================================== // Get SMP node number and cluster number on which this arkcmp.exe is running. //============================================================================== void NADefaults::getNodeAndClusterNumbers(short& nodeNum, Int32& clusterNum) { SB_Phandle_Type pHandle; Int32 error = XPROCESSHANDLE_GETMINE_(&pHandle); Int32 nodeNumInt; // XPROCESSHANDLE_DECOMPOSE_ takes an integer. Int32 pin; error = XPROCESSHANDLE_DECOMPOSE_(&pHandle, &nodeNumInt, &pin, &clusterNum); nodeNum = nodeNumInt; // Store 4-byte integer back to short integer CMPASSERT(error == 0); } inline static NABoolean initializeSQLdone() { return FALSE; } // Setup for readFromSQLTable(): // #include "SQLCLIdev.h" const SQLMODULE_ID __SQL_mod_866668761818000 = { /* version */ SQLCLI_CURRENT_VERSION, /* module name */ "HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.READDEF_N29_000", /* time stamp */ 866668761818000LL, /* char set */ "ISO88591", /* name length */ 47 }; static const Int32 MAX_VALUE_LEN = 1000; // Read the SQL defaults table, to layer on further defaults. // // [1] This is designed such that it can be called multiple times // (a site-wide defaults table, then a user-specific one, e.g.) // and by default it will supersede values read/computed from earlier tables. // // [2] It can also be called *after* CQD's have been issued // (e.g. from the getCatalogAndSchema() method) // and by default it will supersede values from earlier tables // but *not* explicitly CQD-ed settings. // // This default behavior is governed by the overwrite* arguments in // various methods (see the .h file). Naturally you can override such behavior, // e.g., if you wanted to reset to an earlier state, erasing all user CQD's. // void NADefaults::readFromSQLTable(const char *tname, Provenance overwriteIfNotYet, Int32 errOrWarn) { char value[MAX_VALUE_LEN + 1]; // CMPASSERT(MAX_VALUE_LEN >= ComMAX_2_PART_EXTERNAL_UCS2_NAME_LEN_IN_NAWCHARS); // First (but only if NSK-LITE Services exist), // write system parameters (attributes DEF_*) into DefaultDefaults, // then copy DefaultDefaults into CurrentDefaults. if (!cmpCurrentContext->isStandalone()) { Lng32 initialErrCnt = CmpCommon::diags()->getNumber(); // Set this *before* doing any insert()'s ... currentState_ = READ_FROM_SQL_TABLE; Int32 loop_here=0; while (loop_here > 10) { loop_here++; if (loop_here > 1000) loop_here=100; } if (tname) { NABoolean isSQLTable = TRUE; if (*tname == ' ') { // called from NADefaults::readFromFlatFile() isSQLTable = FALSE; // -- see kludge in .h file! tname++; } char attrName[101]; // column ATTRIBUTE VARCHAR(100) UPSHIFT Int32 sqlcode; static THREAD_P struct SQLCLI_OBJ_ID __SQL_id0; FILE *flatfile = NULL; if (isSQLTable) { init_SQLCLI_OBJ_ID(&__SQL_id0, SQLCLI_CURRENT_VERSION, cursor_name, &__SQL_mod_866668761818000, "S1", 0, SQLCHARSETSTRING_ISO88591, 2); /* EXEC SQL OPEN S1; See file NADefaults.mdf for cursor declaration */ sqlcode = SQL_EXEC_ClearDiagnostics(&__SQL_id0); sqlcode = SQL_EXEC_Exec(&__SQL_id0,NULL,1,tname,NULL); } else { flatfile = fopen(tname, "r"); sqlcode = flatfile ? 0 : -ABS(arkcmpErrorFileOpenForRead); } /* EXEC SQL FETCH S1 INTO :attrName, :value; */ // Since the DEFAULTS table is PRIMARY KEY (SUBSYSTEM, ATTRIBUTE), // we'll fetch (scanning the clustering index) // CATALOG before SCHEMA; this is important if user has rows like // ('CATALOG','c1') and ('SCHEMA','c2.sn') -- // the schema setting must supersede the catalog one. // We should also put an ORDER BY into the cursor decl in the .mdf, // to handle user-created DEFAULTS tables w/o a PK. if (sqlcode >= 0) if (isSQLTable) { sqlcode = SQL_EXEC_Fetch(&__SQL_id0,NULL,2,attrName,NULL,value,NULL); if (sqlcode >= 0) readFromSQDefaultsTable_ = TRUE; } else { value[0] = 0; // NULL terminator if (fscanf(flatfile, " %100[A-Za-z0-9_#] ,", attrName) < 0) sqlcode = +100; else fgets((char *) value, sizeof(value), flatfile); } // Ignore warnings except for end-of-data while (sqlcode >= 0 && sqlcode != +100) { NAString v(value); // skip comments, indicated by a # if (attrName[0] != '#') validateAndInsert(attrName, v, FALSE, errOrWarn, overwriteIfNotYet); /* EXEC SQL FETCH S1 INTO :attrName, :value; */ if (isSQLTable) sqlcode = SQL_EXEC_Fetch(&__SQL_id0,NULL,2,attrName,NULL,value,NULL); else { value[0] = 0; // NULL terminator if (fscanf(flatfile, " %100[A-Za-z0-9_#] ,", attrName) < 0) sqlcode = +100; else fgets((char *) value, sizeof(value), flatfile); } } if (sqlcode < 0 && errOrWarn && initializeSQLdone()) { if (ABS(sqlcode) == ABS(CLI_MODULEFILE_OPEN_ERROR) && cmpCurrentContext->isInstalling()) { // Emit no warning when (re)installing, // because obviously the module will not exist before we have // (re)arkcmp'd it! } else { // 2001 Error $0 reading table $1. Using $2 values. CollIndex n = tablesRead_.entries(); const char *errtext = n ? tablesRead_[n-1].data() : "default-default"; *CmpCommon::diags() << DgSqlCode(ERRWARN(2001)) << DgInt0(sqlcode) << DgTableName(tname) << DgString0(errtext); } } if (isSQLTable) { /* EXEC SQL CLOSE S1; */ sqlcode = SQL_EXEC_ClearDiagnostics(&__SQL_id0); sqlcode = SQL_EXEC_CloseStmt(&__SQL_id0); // The above statement should not start any transactions because // it uses read uncommitted access. If it ever changes, then we // would need to commit it at this time. } } // tname if (initialErrCnt < CmpCommon::diags()->getNumber() && errOrWarn) *CmpCommon::diags() << DgSqlCode(ERRWARN(2059)) << DgString0(tname ? tname : ""); } // isStandalone } // NADefaults::readFromSQLTable() void NADefaults::readFromSQLTables(Provenance overwriteIfNotYet, Int32 errOrWarn) { NABoolean cat = FALSE; NABoolean sch = FALSE; if (getToken(MODE_SEABASE) == DF_ON && !readFromSQDefaultsTable()) { // Read system defaults from configuration file. // keep this name in sync with file cli/SessionDefaults.cpp NAString confFile(getenv("MY_SQROOT")); confFile += "/etc/SQSystemDefaults.conf"; readFromFlatFile(confFile, overwriteIfNotYet, errOrWarn); tablesRead_.insert(confFile); CmpSeabaseDDL cmpSBD((NAHeap *)heap_, FALSE); Lng32 hbaseErr = 0; NAString hbaseErrStr; Lng32 errNum = cmpSBD.validateVersions(this, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &hbaseErr, &hbaseErrStr); if (errNum == 0) // seabase is initialized properly { // read from seabase defaults table cmpSBD.readAndInitDefaultsFromSeabaseDefaultsTable (overwriteIfNotYet, errOrWarn, this); // set authorization state NABoolean checkAllPrivTables = FALSE; errNum = cmpSBD.isPrivMgrMetadataInitialized(this,checkAllPrivTables); CmpCommon::context()->setAuthorizationState(errNum); } else { CmpCommon::context()->setIsUninitializedSeabase(TRUE); CmpCommon::context()->uninitializedSeabaseErrNum() = errNum; CmpCommon::context()->hbaseErrNum() = hbaseErr; CmpCommon::context()->hbaseErrStr() = hbaseErrStr; } } currentState_ = SET_BY_CQD; // enter the next state... // Make self fully consistent, by executing deferred actions last of all getSqlParser_NADefaults(); } // NADefaults::readFromSQLTables() // This method is used by SchemaDB::initPerStatement const char * NADefaults::getValueWhileInitializing(Int32 attrEnum) { // We can't rely on our state_ because SQLC might have called CQD::bindNode() // which does a setState(SET_BY_CQD)... if (!tablesRead_.entries()) if (getProvenance(attrEnum) < SET_BY_CQD) readFromSQLTables(SET_BY_CQD); return getValue(attrEnum); } // This method is used by SchemaDB::initPerStatement *and* // by CmpCommon, CmpStatement, and SQLC/SQLCO. void NADefaults::getCatalogAndSchema(NAString &cat, NAString &sch) { cat = getValueWhileInitializing(CATALOG); sch = getValueWhileInitializing(SCHEMA); } // Should be called only privately and by DefaultValidator! Int32 NADefaults::validateFloat(const char *value, float &result, Int32 attrEnum, Int32 errOrWarn) const { Int32 n = -1; // NT's scanf("%n") is not quite correct; hence this code-around sscanf(value, "%g%n", &result, &n); if (n > 0 && value[n] == '\0') return TRUE; // a valid float NAString v(value); NABoolean silentIf = (errOrWarn == SilentIfSYSTEM); if (silentIf) errOrWarn = 0/*silent*/; NABoolean useSYSTEM = (token(attrEnum, v, TRUE, errOrWarn) == DF_SYSTEM); if (useSYSTEM && silentIf) // ValidateNumeric is caller return SilentIfSYSTEM; // special it-is-valid return! if (errOrWarn) *CmpCommon::diags() << DgSqlCode(ERRWARN(2055)) << DgString0(value) << DgString1(lookupAttrName(attrEnum, errOrWarn)); if (useSYSTEM) { // programmer error CMPASSERT("Numeric attr allows SYSTEM -- you need to call token() first to see if its current value is this keyword, and compute your system default value!" == NULL); } // ensure an out-of-range error if domainMatch or ValidateNumeric is called result = -FLT_MAX; return FALSE; // not valid } NABoolean NADefaults::insert(Int32 attrEnum, const NAString &value, Int32 errOrWarn) { // private method; callers have all already done this: ATTR_RANGE_ASSERT; assert(errOrWarn != SilentIfSYSTEM); // yeh private, but just in case // Update cache: // (Re)validate that new value is numeric. // Useful if programmer did not correctly specify the DefaultValidator for // this attr in DefaultDefaults. // if (currentFloats_[attrEnum]) { float result; if (validateFloat(value, result, attrEnum, errOrWarn)) *currentFloats_[attrEnum] = result; else return FALSE; // not a valid float } // Update cache for DefaultToken by deallocating the cached entry. if ( currentTokens_[attrEnum] ) { NADELETEBASIC( currentTokens_[attrEnum], NADHEAP ); currentTokens_[attrEnum] = NULL; } // If we're past the read-from-SQLTable phase, then // the first CQD of a given attr must first save the from-SQLTable value, // to which the user can RESET if desired. // if (currentState_ >= SET_BY_CQD && !resetToDefaults_[attrEnum]) { NAString currValStr(currentDefaults_[attrEnum]); Lng32 currValLen = str_len(currValStr) + 1; char *pCurrVal = new NADHEAP char[currValLen]; str_cpy_all(pCurrVal, currValStr, currValLen); resetToDefaults_[attrEnum] = pCurrVal; } char *newVal = NULL; Lng32 newValLen = str_len(value) + 1; if (provenances_[attrEnum] > INIT_DEFAULT_DEFAULTS) { Lng32 oldValLen = str_len(currentDefaults_[attrEnum]) + 1; if (oldValLen >= newValLen && oldValLen < newValLen + 100) newVal = const_cast<char*>(currentDefaults_[attrEnum]); // reuse, to reduce mem frag else NADELETEBASIC(currentDefaults_[attrEnum], NADHEAP); } if (!newVal) newVal = new NADHEAP char[newValLen]; str_cpy_all(newVal, value, newValLen); currentDefaults_[attrEnum] = newVal; // when the parser flag is on for a set-once CQD // set its provenance as INIT_DEFAULT_DEFAULTS, // so the user can set it once later if ( isSetOnceAttribute(attrEnum) && Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL) ) { provenances_[attrEnum] = INIT_DEFAULT_DEFAULTS; } else { provenances_[attrEnum] = currentState_; } return TRUE; } NADefaults::Provenance NADefaults::getProvenance(Int32 attrEnum) const { ATTR_RANGE_ASSERT; return (Provenance)provenances_[attrEnum]; } NABoolean NADefaults::getValue(Int32 attrEnum, NAString &result) const { ATTR_RANGE_ASSERT; result = currentDefaults_[attrEnum]; return TRUE; // we always have a STRING REPRESENTATION value } NAString NADefaults::getString(Int32 attrEnum) const { ATTR_RANGE_ASSERT; return currentDefaults_[attrEnum]; } const char * NADefaults::getValue(Int32 attrEnum) const { ATTR_RANGE_ASSERT; return currentDefaults_[attrEnum]; } NABoolean NADefaults::getFloat(Int32 attrEnum, float &result) const { ATTR_RANGE_ASSERT; if (currentFloats_[attrEnum]) { result = *currentFloats_[attrEnum]; } else if (validateFloat(currentDefaults_[attrEnum], result, attrEnum)) { currentFloats_[attrEnum] = new NADHEAP float; // cache the result *currentFloats_[attrEnum] = result; } else { return FALSE; // result is neg, from failed validateFloat() } return TRUE; } double NADefaults::getAsDouble(Int32 attrEnum) const { // No domainMatch() needed: any float or double (or int or uint) is okay; // getFloat()/validateFloat() will disallow any non-numerics. float flt; getFloat(attrEnum, flt); return double(flt); } Lng32 NADefaults::getAsLong(Int32 attrEnum) const { float flt; getFloat(attrEnum, flt); if (!domainMatch(attrEnum, VALID_INT, &flt)) { CMPBREAK; } return Lng32(flt); } ULng32 NADefaults::getAsULong(Int32 attrEnum) const { float flt; getFloat(attrEnum, flt); if (!domainMatch(attrEnum, VALID_UINT, &flt)) { CMPBREAK; } return (ULng32)(flt); } ULng32 NADefaults::getNumOfESPsPerNode() const { return (ULng32)MAXOF(ceil(getNumOfESPsPerNodeInFloat()), 1); } float NADefaults::getNumOfESPsPerNodeInFloat() const { double maxEspPerCpuPerOp = getAsDouble(MAX_ESPS_PER_CPU_PER_OP); CollIndex cores = ( (CmpCommon::context() && CURRSTMT_OPTDEFAULTS->isFakeHardware()) ) ? getAsLong(DEF_NUM_SMP_CPUS) : gpClusterInfo->numberOfCpusPerSMP(); return float(maxEspPerCpuPerOp * cores); } ULng32 NADefaults::getTotalNumOfESPsInCluster(NABoolean& fakeEnv) const { fakeEnv = FALSE; if (getToken(PARALLEL_NUM_ESPS, 0) != DF_SYSTEM ) { fakeEnv = TRUE; return getAsLong(PARALLEL_NUM_ESPS); } float espsPerNode = getNumOfESPsPerNodeInFloat(); CollIndex numOfNodes = gpClusterInfo->numOfSMPs(); if ( (CmpCommon::context() && CURRSTMT_OPTDEFAULTS->isFakeHardware())) { fakeEnv = TRUE; numOfNodes = getAsLong(DEF_NUM_NODES_IN_ACTIVE_CLUSTERS); } return MAXOF(ceil(espsPerNode * numOfNodes), 1); } NABoolean NADefaults::domainMatch(Int32 attrEnum, Int32 expectedType/*DefaultValidatorType*/, float *flt) const { if (validator(attrEnum)->getType() == expectedType) return TRUE; // yes, domains match // Emit error messages only if the value is actually out-of-range. // // Users (optimizer code) should REALLY be using 'unsigned long' fields // and calling getAsULong, instead of using 'long' fields to retrieve // unsigned(DDui*) attr values via getAsLong ... // // LCOV_EXCL_START // if we get here the compiler will crash if (flt) { DefaultValidator *validator = NULL; if (expectedType == VALID_INT) validator = (DefaultValidator *)&validateInt; else if (expectedType == VALID_UINT) validator = (DefaultValidator *)&validateUI; // Explicitly check for TRUE here -- // both FALSE/error and SilentIfSYSTEM are out-of-range/out-of-domain // from this method's point of view. if (validator) if (validator->validate( currentDefaults_[attrEnum], this, attrEnum, -1, flt) == TRUE) return TRUE; // domain mismatch, but value *is* in the domain range } // fall thru to emit additional failure info *CmpCommon::diags() << DgSqlCode(+2058) // emit a mismatch WARNING << DgString0(lookupAttrName(attrEnum)) << DgString1(validator(attrEnum)->getTypeText()) << DgString2(DefaultValidator::getTypeText( DefaultValidatorType(expectedType))); #ifndef NDEBUG cerr << "Warning[2058] " << lookupAttrName(attrEnum) << " " << validator(attrEnum)->getTypeText() << " " << DefaultValidator::getTypeText( DefaultValidatorType(expectedType)) << " " << (flt ? *flt : 123.45) << endl; #endif // LCOV_EXCL_STOP return FALSE; } // CONTROL QUERY DEFAULT attr RESET; // resets the single attr to the value it had right after we read all // the DEFAULTS tables, // or the value it had right before a CQD * RESET RESET. // CONTROL QUERY DEFAULT * RESET; // resets all attrs to the values they had by same criteria as above. // CONTROL QUERY DEFAULT * RESET RESET; // resets the "reset-to" values so that all current values become the // effective "reset-to"'s -- i.e, the current values can't be lost // on the next CQD * RESET; // Useful for apps that dynamically send startup settings that ought // to be preserved -- ODBC and SQLCI do this. // void NADefaults::resetAll(NAString &value, NABoolean reset, Int32 errOrWarn) { size_t i, numAttrs = numDefaultAttributes(); if (reset == 1) { // CQD * RESET; (not RESET RESET) setResetAll(TRUE); for (i = 0; i < numAttrs; i++) { const char * attributeName = defaultDefaults[i].attrName; DefaultConstants attrEnum = lookupAttrName(attributeName, errOrWarn); if (isNonResetableAttribute(attributeName)) continue; validateAndInsert(attributeName, value, TRUE, errOrWarn); } // if DEFAULT_SCHEMA_NAMETYPE=USER after CQD * RESET // set SCHEMA to LDAP_USERNAME // if SCHEMA has not been specified by user if ( (getToken(DEFAULT_SCHEMA_NAMETYPE) == DF_USER) && schSetByNametype() ) { setSchemaAsLdapUser(); } setResetAll(FALSE); } else if (reset == 2) { for (i = 0; i < numAttrs; i++) { if (resetToDefaults_[i]) { // CONTROL QUERY DEFAULT * RESET RESET; -- this code cloned below // Can't reset prov, because to which? // provenances_[i] = READ_FROM_SQL_TABLE or COMPUTED ?? NADELETEBASIC(resetToDefaults_[i], NADHEAP); resetToDefaults_[i] = NULL; } } } else { CMPASSERT(!reset); } } // Reset to default-defaults, as if readFromSQLTables() had not executed, // but setting state and provenance so no future reads will be triggered. // See StaticCompiler and Genesis 10-990204-2469 above for motivation. void NADefaults::undoReadsAndResetToDefaultDefaults() { initCurrentDefaultsWithDefaultDefaults(); } NABoolean NADefaults::isReadonlyAttribute(const char* attrName) const { if ((( stricmp(attrName, "ISO_MAPPING") == 0 ) || ( stricmp(attrName, "OVERFLOW_MODE") == 0 ) || ( stricmp(attrName, "SORT_ALGO") == 0 )) && ( CmpCommon::getDefault(DISABLE_READ_ONLY) == DF_ON )) return FALSE; // for internal development and testing purposes if (( stricmp(attrName, "ISO_MAPPING") == 0 )|| ( stricmp(attrName, "MODE_SPECIAL_1") == 0 ) || ( stricmp(attrName, "MODE_SPECIAL_2") == 0 ) || ( stricmp(attrName, "NATIONAL_CHARSET") == 0 ) || ( stricmp(attrName, "VALIDATE_VIEWS_AT_OPEN_TIME") == 0 ) || ( stricmp(attrName, "USER_EXPERIENCE_LEVEL") == 0 ) || ( stricmp(attrName, "POS_DISKS_IN_SEGMENT") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_HASHJOIN") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_MERGEJOIN") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_HASHGROUPBY") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_SORT") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_PROBE_CACHE") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_PA") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_SEQUENCE") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_EXCHANGE") == 0 ) || ( stricmp(attrName, "SORT_ALGO") == 0 ) || ( stricmp(attrName, "OVERFLOW_MODE") == 0 ) ) return TRUE; if (strlen(attrName) > 0) { DefaultConstants v = lookupAttrName(attrName, 0, 0); if ((v != __INVALID_DEFAULT_ATTRIBUTE) && (getFlags(v) & DEFAULT_IS_SSD)) return TRUE; } return FALSE; } // these defaults cannot be reset or set to FALSE through a cqd. NABoolean NADefaults::isNonResetableAttribute(const char* attrName) const { if (( stricmp(attrName, "IS_SQLCI") == 0 ) || ( stricmp(attrName, "NVCI_PROCESS") == 0 ) || ( stricmp(attrName, "SESSION_ID") == 0 ) || ( stricmp(attrName, "LDAP_USERNAME") == 0 ) || ( stricmp(attrName, "VOLATILE_SCHEMA_IN_USE") == 0 ) || ( stricmp(attrName, "SESSION_USERNAME") == 0 ) ) return TRUE; return FALSE; } // these defaults can be set only once by user. NABoolean NADefaults::isSetOnceAttribute(Int32 attrEnum) const { if ( attrEnum == DEFAULT_SCHEMA_ACCESS_ONLY || attrEnum == PUBLISHING_ROLES ) return TRUE; return FALSE; } void NADefaults::resetSessionOnlyDefaults() { NAString value; validateAndInsert("NVCI_PROCESS", value, 3, 0); } // Parameter <reset> must not be a reference (&); // see <value = ... fall thru> below. enum DefaultConstants NADefaults::validateAndInsert(const char *attrName, NAString &value, NABoolean reset, Int32 errOrWarn, Provenance overwriteIfNotYet) { NABoolean overwrite = FALSE; NABoolean isJDBC = FALSE; NABoolean isODBC = FALSE; if (ActiveSchemaDB()) { isJDBC = (CmpCommon::getDefault(JDBC_PROCESS) == DF_ON ? TRUE : FALSE); isODBC = (CmpCommon::getDefault(ODBC_PROCESS) == DF_ON ? TRUE : FALSE); } if (reset && !attrName[0]) { // CONTROL QUERY DEFAULT * RESET overwrite = currentState_ < overwriteIfNotYet; if (overwrite) resetAll(value, reset, errOrWarn); return (DefaultConstants)0; // success } // Perform a lookup for the string, using a binary search. DefaultConstants attrEnum = lookupAttrName(attrName, errOrWarn); if (attrEnum >= 0) { // valid attrName // ignore DEFAULT_SCHEMA_ACCESS_ONLY if it is in system defaults if ( attrEnum == DEFAULT_SCHEMA_ACCESS_ONLY && getState() < SET_BY_CQD ) return attrEnum; // do the following check when // this is the primary mxcmp // and INTERNAL_QUERY_FROM_EXEUTIL is not set if (!CmpCommon::context()->isSecondaryMxcmp() && !Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)) { // This logic will catch if the set-once CQD // is set, but the ALLOW_SET_ONCE_DEFAULTS parserflags // are not set. This is absolutely necessary for security // to ensure that the correct parserflags are set. if ((isSetOnceAttribute(attrEnum)) && (!isResetAll()) && // no error msg for cqd * reset (NOT Get_SqlParser_Flags(ALLOW_SET_ONCE_DEFAULTS))) { *CmpCommon::diags() << DgSqlCode(-30042) << DgString0(attrName); return attrEnum; } // if DEFAULT_SCHEMA_ACCESS_ONLY is on, // users cannot change the following CQDs if ( getState() >= SET_BY_CQD && getToken(DEFAULT_SCHEMA_ACCESS_ONLY) == DF_ON ) { if (attrEnum == SCHEMA || attrEnum == PUBLIC_SCHEMA_NAME || attrEnum == DEFAULT_SCHEMA_NAMETYPE || attrEnum == PUBLISHING_ROLES) { if (!isResetAll()) // no error msg for cqd * reset *CmpCommon::diags() << DgSqlCode(-30043) << DgString0(attrName); return attrEnum; } } } else { // ignore LAST0_MODE cqd if we are in secondary mxcmp or if // internal_query_from_exeutil is set. This cqd is not meant // to apply in these cases if ( attrEnum == LAST0_MODE ) return attrEnum; } overwrite = getProvenance(attrEnum) < overwriteIfNotYet; // Put value into canonical form (trimmed, upcased where pertinent). // // Possibly revert to initial default default value -- see NOTE 3 up above. // Note further that ANSI names cannot revert on values of // 'SYSTEM' or 'ENABLE', as those are legal cat/sch/tbl names, // nor can they revert on '' (empty/blank), as ANSI requires us to // emit a syntax error for this. // // Possibly RESET to read-from-table value (before any CQD value). // TrimNAStringSpace(value); if (validator(attrEnum) != &validateAnsiName && !reset) { validator(attrEnum)->applyUpper(value); if (isSynonymOfSYSTEM(attrEnum, value)) value = getDefaultDefaultValue(attrEnum); else if (isSynonymOfRESET(value)) // CQD attr 'RESET'; ... reset = 1; } if (reset) { // CQD attr RESET; if ((isNonResetableAttribute(attrName)) && (reset != 3)) return attrEnum; if (!resetToDefaults_[attrEnum]) { if (overwrite) value = currentDefaults_[attrEnum]; // return actual val to caller if (attrEnum == ISOLATION_LEVEL) { // reset this in the global area TransMode::IsolationLevel il; getIsolationLevel(il); CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il); } // Solution: 10-060418-5903. Do not update MXCMP global access mode // with CQD ISOLATION_LEVEL_FOR_UPDATES as it will overwrite that // set by ISOLATION_LEVE. The CQD ISOLATION_LEVEL_FOR_UPDATES is // always accessed directly when necessary. //else if (attrEnum == ISOLATION_LEVEL_FOR_UPDATES) // { // // reset this in the global area // TransMode::IsolationLevel il; // getIsolationLevel(il, getToken(attrEnum)); // CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il, // FALSE); // } return attrEnum; } value = resetToDefaults_[attrEnum]; // fall thru, REINSERT this val } if (attrEnum == CATALOG) { if (!setCatalog(value, errOrWarn, overwrite)) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; else { if (getState() == READ_FROM_SQL_TABLE) { // set the volatile catalog to be same as the catalog read from // defaults table. If there is no catalog or volatile_catalog // specified in the defaults table, then volatile catalog name // will be the default catalog in use in the session where // volatile tables are created. CmpCommon::context()->sqlSession()->setVolatileCatalogName(value); } } } else if (attrEnum == SCHEMA) { if (!setSchema(value, errOrWarn, overwrite)) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; else { if (getState() == READ_FROM_SQL_TABLE) { // set the volatile catalog to be same as the catalog read from // defaults table. If there is no catalog or volatile_catalog // specified in the defaults table, then volatile catalog name // will be the default catalog in use in the session where // volatile tables are created. NAString cat(getValue(CATALOG)); CmpCommon::context()->sqlSession()->setVolatileCatalogName(cat); } } } else if (attrEnum == MP_SUBVOLUME && value.first('.') != NA_NPOS) { if (!setMPLoc(value, errOrWarn, overwriteIfNotYet)) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } else { if ( attrEnum == MAX_LONG_VARCHAR_DEFAULT_SIZE || attrEnum == MAX_LONG_WVARCHAR_DEFAULT_SIZE ) { ULng32 minLength; switch (attrEnum) { case MAX_LONG_VARCHAR_DEFAULT_SIZE: minLength = (Lng32)getAsULong(MIN_LONG_VARCHAR_DEFAULT_SIZE); break; case MAX_LONG_WVARCHAR_DEFAULT_SIZE: minLength = (Lng32)getAsULong(MIN_LONG_WVARCHAR_DEFAULT_SIZE); break; default: attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } if ( attrEnum != __INVALID_DEFAULT_ATTRIBUTE ) { UInt32 newMaxLength; Int32 n = -1; sscanf(value.data(), "%u%n", &newMaxLength, &n); if ( n>0 && (UInt32)n == value.length() ) { // a valid unsigned number if ( newMaxLength < minLength ) { *CmpCommon::diags() << DgSqlCode(-2030) << DgInt0((Lng32)minLength); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } } } if ( attrEnum == MIN_LONG_VARCHAR_DEFAULT_SIZE || attrEnum == MIN_LONG_WVARCHAR_DEFAULT_SIZE ) { ULng32 maxLength; switch (attrEnum) { case MIN_LONG_VARCHAR_DEFAULT_SIZE: maxLength = getAsULong(MAX_LONG_VARCHAR_DEFAULT_SIZE); break; case MIN_LONG_WVARCHAR_DEFAULT_SIZE: maxLength = getAsULong(MAX_LONG_WVARCHAR_DEFAULT_SIZE); break; default: attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } if ( attrEnum != __INVALID_DEFAULT_ATTRIBUTE ) { UInt32 newMinLength; Int32 n = -1; sscanf(value.data(), "%u%n", &newMinLength, &n); if ( n>0 && (UInt32)n == value.length() ) { // a valid unsigned number if ( newMinLength > maxLength ) { *CmpCommon::diags() << DgSqlCode(-2029) << DgInt0((Lng32)maxLength); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } } } if (errOrWarn && (attrEnum == ROUNDING_MODE)) { if (NOT ((value.length() == 1) && ((*value.data() == '0') || (*value.data() == '1') || (*value.data() == '2')))) { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } if ( attrEnum == SCRATCH_MAX_OPENS_HASH || attrEnum == SCRATCH_MAX_OPENS_SORT ) { if (NOT ((value.length() == 1) && ((*value.data() == '1') || (*value.data() == '2') || (*value.data() == '3') || (*value.data() == '4')))) { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } if (attrEnum != __INVALID_DEFAULT_ATTRIBUTE) { // We know that the MP_COLLATIONS validator emits only warnings // and always returns TRUE. On the validate-but-do-not-insert step // (CQD compilation), those warnings will be seen by the user. // On the validate-AND-insert (CQD execution), there is no need // to repeat them (besides, that causes Executor to choke on the // warnings in the diags and say 'Error fetching from TCB tree'). Int32 isValid = TRUE; if (!overwrite || currentState_ < SET_BY_CQD || validator(attrEnum) != &validateCollList) isValid = validator(attrEnum)->validate(value, this, attrEnum, errOrWarn); // if an internal reset is being done, then make it a valid attr // even if the 'validate' method above returned invalid. if ((!isValid) && (isNonResetableAttribute(attrName)) && (reset == 3)) { isValid = TRUE; } if (!isValid) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; else if (overwrite) { if (isValid == SilentIfSYSTEM) { // defDef value was "SYSTEM" or "" // Undo any caching from getFloat() NADELETEBASIC(currentFloats_[attrEnum], NADHEAP); currentFloats_[attrEnum] = NULL; // Undo any caching from getToken() NADELETEBASIC( currentTokens_[attrEnum], NADHEAP ); currentTokens_[attrEnum] = NULL; // Now fall thru to insert the string "SYSTEM" or "" } if (attrEnum == MP_CATALOG) { // This will apply default \sys to value if only $v.sv was specified. ComMPLoc loc(value, ComMPLoc::SUBVOL); value = loc.getMPName(); } if (!insert(attrEnum, value, errOrWarn)) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } // overwrite (i.e. insert) } } // not special val/ins for CAT, SCH, or MPLOC } // valid attrName if (attrEnum >= 0) { if (overwrite) { if ((! reset) && (currentState_ == SET_BY_CQD)) { // indicate that this attribute was set by a user CQD. setUserDefault(attrEnum, TRUE); } switch (attrEnum) { case MP_SYSTEM: case MP_VOLUME: case MP_SUBVOLUME: // // Signal to reconstruct MPLOC and MPLOC_as_SchemaName // on the next query, i.e. next call to getSqlParser_NADefaults(). SqlParser_NADefaults_->MPLOC_.setUnknown(); case CATALOG: case SCHEMA: break; case ISOLATION_LEVEL: { // Ansi 14.1 SR 4. See comexe/ExControlArea::addControl(). //## I now think this implementation is wrong //## because this is setting GLOBAL state //## for something that should be CONTEXT-dependent. //## Will cause us headaches later, when we //## make arkcmp be a multi-context multi-threaded server. TransMode::IsolationLevel il; getIsolationLevel(il); CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il); } break; // Solution: 10-060418-5903. Do not update MXCMP global access mode // with CQD ISOLATION_LEVEL_FOR_UPDATES as it will overwrite that // set by ISOLATION_LEVEL. The CQD ISOLATION_LEVEL_FOR_UPDATES is // always accessed directly when necessary. //case ISOLATION_LEVEL_FOR_UPDATES: //{ // TransMode::IsolationLevel il; // getIsolationLevel(il, getToken(attrEnum)); // CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il, // FALSE); //} //break; case MODE_SPECIAL_1: { if (getToken(MODE_SPECIAL_2) == DF_ON) { // MS1 was already set by now. Reset it and return an error. insert(MODE_SPECIAL_1, "OFF", errOrWarn); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } // find_suitable_key to be turned off in this mode, unless // it has been explicitely set. if (getToken(VOLATILE_TABLE_FIND_SUITABLE_KEY) == DF_SYSTEM) { insert(VOLATILE_TABLE_FIND_SUITABLE_KEY, "OFF", errOrWarn); } } break; case MODE_SPECIAL_2: { NAString val; if (getToken(MODE_SPECIAL_1) == DF_ON) { // MS2 was already set by now. Reset it and return an error. insert(MODE_SPECIAL_2, "OFF", errOrWarn); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; break; } if (value == "ON") val = "ON"; else val = resetToDefaults_[LIMIT_MAX_NUMERIC_PRECISION]; if (getToken(LIMIT_MAX_NUMERIC_PRECISION) == DF_SYSTEM) { insert(LIMIT_MAX_NUMERIC_PRECISION, val, errOrWarn); } if (value == "ON") val = "2"; else val = resetToDefaults_[ROUNDING_MODE]; insert(ROUNDING_MODE, val, errOrWarn); } break; case MODE_SPECIAL_4: { NAString val; if (value == "ON") val = "ON"; else val = "OFF"; insert(ALLOW_INCOMPATIBLE_COMPARISON, val, errOrWarn); insert(ALLOW_INCOMPATIBLE_ASSIGNMENT, val, errOrWarn); insert(ALLOW_NULLABLE_UNIQUE_KEY_CONSTRAINT, val, errOrWarn); insert(MODE_SPECIAL_3, val, errOrWarn); NAString csVal; if (value == "ON") csVal = SQLCHARSETSTRING_UTF8; else csVal = ""; validateAndInsert("TRAF_DEFAULT_COL_CHARSET", csVal, FALSE, errOrWarn); NAString notVal; if (value == "ON") notVal = "OFF"; else notVal = "ON"; insert(TRAF_COL_LENGTH_IS_CHAR, notVal, errOrWarn); NAString costVal1; NAString costVal2; if (value == "ON") { costVal1 = "8.0"; costVal2 = "16.0" ; } else { costVal1 = "1.0"; costVal2 = "1.0" ; } validateAndInsert("NCM_IND_JOIN_COST_ADJ_FACTOR", costVal1, FALSE, errOrWarn); validateAndInsert("NCM_IND_SCAN_COST_ADJ_FACTOR", costVal2, FALSE, errOrWarn); if (value == "ON") Set_SqlParser_Flags(IN_MODE_SPECIAL_4); else Reset_SqlParser_Flags(IN_MODE_SPECIAL_4); } break; case MODE_SPECIAL_5: { NAString val; if (value == "ON") val = "ON"; else val = "OFF"; insert(ALLOW_INCOMPATIBLE_COMPARISON, val, errOrWarn); insert(ALLOW_INCOMPATIBLE_ASSIGNMENT, val, errOrWarn); insert(ALLOW_NULLABLE_UNIQUE_KEY_CONSTRAINT, val, errOrWarn); insert(TRAF_ALLOW_SELF_REF_CONSTR, val, errOrWarn); } break; case MODE_SEABASE: { if (value == "ON") { if (NOT seabaseDefaultsTableRead()) { CmpSeabaseDDL cmpSBD((NAHeap *)heap_); Lng32 errNum = cmpSBD.validateVersions(this); if (errNum == 0) // seabase is initialized properly { // read from seabase defaults table cmpSBD.readAndInitDefaultsFromSeabaseDefaultsTable (overwriteIfNotYet, errOrWarn, this); } else { CmpCommon::context()->setIsUninitializedSeabase(TRUE); CmpCommon::context()->uninitializedSeabaseErrNum() = errNum; } } NAString sbCat = getValue(SEABASE_CATALOG); insert(SEABASE_VOLATILE_TABLES, "ON", errOrWarn); CmpCommon::context()->sqlSession()->setVolatileCatalogName(sbCat, TRUE); insert(UPD_SAVEPOINT_ON_ERROR, "OFF", errOrWarn); } else { NAString defCat = getValue(CATALOG); insert(SEABASE_VOLATILE_TABLES, "OFF", errOrWarn); CmpCommon::context()->sqlSession()->setVolatileCatalogName(defCat); insert(UPD_SAVEPOINT_ON_ERROR, "ON", errOrWarn); } } break; case MEMORY_LIMIT_QCACHE_UPPER_KB: CURRENTQCACHE->setHeapUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_HISTCACHE_UPPER_KB: CURRCONTEXT_HISTCACHE->setHeapUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_CMPSTMT_UPPER_KB: STMTHEAP->setUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_CMPCTXT_UPPER_KB: CTXTHEAP->setUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_NATABLECACHE_UPPER_KB: ActiveSchemaDB()->getNATableDB()->setHeapUpperLimit((size_t) 1024 * atoi(value.data())); break; case NAMETYPE: SqlParser_NADefaults_->NAMETYPE_ = token(NAMETYPE, value, TRUE); break; case NATIONAL_CHARSET: SqlParser_NADefaults_->NATIONAL_CHARSET_ = CharInfo::getCharSetEnum(value); break; case SESSION_ID: { CmpCommon::context()->sqlSession()->setSessionId(value); } break; case SESSION_USERNAME: { CmpCommon::context()->sqlSession()->setSessionUsername(value); } break; case SESSION_IN_USE: { CmpCommon::context()->sqlSession()->setSessionInUse ((getToken(attrEnum) == DF_ON)); } break; case SQLMX_REGRESS: { if (value == "ON") { insert(SIMILARITY_CHECK, "OFF", errOrWarn); insert(COMP_BOOL_157, "ON", errOrWarn); insert(SHOWDDL_DISPLAY_FORMAT, "INTERNAL", errOrWarn); insert(MODE_SPECIAL_1, "OFF", errOrWarn); if (getToken(VOLATILE_TABLE_FIND_SUITABLE_KEY) == DF_SYSTEM) { insert(VOLATILE_TABLE_FIND_SUITABLE_KEY, "OFF", errOrWarn); } char * env = getenv("SQLMX_REGRESS"); if (env) CmpCommon::context()->setSqlmxRegress(atoi(env)); else CmpCommon::context()->setSqlmxRegress(1); } else { insert(SIMILARITY_CHECK, "ON", errOrWarn); insert(COMP_BOOL_157, "OFF", errOrWarn); insert(SHOWDDL_DISPLAY_FORMAT, "EXTERNAL", errOrWarn); CmpCommon::context()->setSqlmxRegress(0); } } break; case VOLATILE_CATALOG: { CmpCommon::context()->sqlSession()->setVolatileCatalogName(value); } break; case VOLATILE_SCHEMA_IN_USE: { CmpCommon::context()->sqlSession()->setVolatileSchemaInUse ((getToken(attrEnum) == DF_ON)); } break; case ISO_MAPPING: { SqlParser_NADefaults_->ISO_MAPPING_ = CharInfo::getCharSetEnum(value); // Set the NAString_isoMappingCS memory cache for use by routines // ToInternalIdentifier() and ToAnsiIdentifier[2|3]() in module // w:/common/NAString[2].cpp. These routines currently cannot // access SqlParser_ISO_MAPPING directly due to the complex // build hierarchy. NAString_setIsoMapCS((SQLCHARSET_CODE) SqlParser_NADefaults_->ISO_MAPPING_); } break; case DEFAULT_CHARSET: { SqlParser_NADefaults_->DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(value); SqlParser_NADefaults_->ORIG_DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(value); } break; case ESP_ON_AGGREGATION_NODES_ONLY: { NABoolean useAgg = (getToken(attrEnum) == DF_ON); gpClusterInfo->setUseAggregationNodesOnly(useAgg); break; } case QUERY_TEXT_CACHE: { // If public schema is in use, query text cache has to be off NAString pSchema = getValue(PUBLIC_SCHEMA_NAME); if (pSchema != "") value = "OFF"; } break; case PUBLIC_SCHEMA_NAME: { // when PUBLIC_SCHEMA is used, turn off Query Text Cache if ( (value != "") && !(getToken(QUERY_TEXT_CACHE) == DF_OFF) ) insert(QUERY_TEXT_CACHE, "OFF"); // when PUBLIC_SCHEMA is not used, reset to the default value if ( value == "" ) { NAString v(""); validateAndInsert("QUERY_TEXT_CACHE", v, TRUE); } } break; case LDAP_USERNAME: { // when the LDAP_USERNAME is set (first time by CLI) // if DEFAULT_SCHEMA_NAMETYPE is USER, set schema to LDAP_USERNAME if ( !value.isNull() && (getToken(DEFAULT_SCHEMA_NAMETYPE) == DF_USER) && !userDefault(SCHEMA) && // do not change user setting ( schSetToUserID() || // only when schema was initialized to guardian id schSetByNametype() ) ) // or changed by same CQD { setSchemaAsLdapUser(value); setSchByNametype(TRUE); } } break; case DEFAULT_SCHEMA_ACCESS_ONLY: { if ( value == "ON" ) { NAString schemaNameType = getValue(DEFAULT_SCHEMA_NAMETYPE); if ( schemaNameType == "USER" ) { setSchemaAsLdapUser(); } } } break; case DEFAULT_SCHEMA_NAMETYPE: { if ( userDefault(SCHEMA) ) // if SCHEMA has been changed by user, do nothing break; if ( value == "SYSTEM" ) // reset to default schema { if ( schSetByNametype() ) // only when schema was changed by this CQD { // do not change catSchSetToUserID_ flag Int32 preVal = catSchSetToUserID_; NAString v(""); validateAndInsert("SCHEMA", v, TRUE); catSchSetToUserID_ = preVal; } } if ( value == "USER" ) // set default schema to ldpa username { if ( schSetToUserID() || // only when schema was initialized to guardian id schSetByNametype() ) // or was changed by this CQD { setSchemaAsLdapUser(); setSchByNametype(TRUE); } } } break; case USTAT_IUS_PERSISTENT_CBF_PATH: { // if the CBF path is SYSTEM, set it to $HOME/cbfs if ( value == "SYSTEM" ) { const char* home = getenv("HOME"); if ( home ) { value = home; value += "/cbfs"; validateAndInsert("USTAT_IUS_PERSISTENT_CBF_PATH", value, FALSE); } } } break; case TRAF_LOAD_ERROR_LOGGING_LOCATION: { if (value.length() > 512) { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); } } break; default: break; } } // code to valid overwrite (insert) if (reset && overwrite) { // CONTROL QUERY DEFAULT attr RESET; -- this code cloned above // Can't reset prov, because to which? // provenances_[attrEnum] = READ_FROM_SQL_TABLE or COMPUTED ?? NADELETEBASIC(resetToDefaults_[attrEnum], NADHEAP); resetToDefaults_[attrEnum] = NULL; } else if (!overwrite && errOrWarn && getProvenance(attrEnum) >= IMMUTABLE) { *CmpCommon::diags() << DgSqlCode(ERRWARN(2200)) << DgString0(lookupAttrName(attrEnum, errOrWarn)); } } // valid attrName return attrEnum; } // NADefaults::validateAndInsert() enum DefaultConstants NADefaults::holdOrRestore (const char *attrName, Lng32 holdOrRestoreCQD) { DefaultConstants attrEnum = __INVALID_DEFAULT_ATTRIBUTE; if (holdOrRestoreCQD == 0) { *CmpCommon::diags() << DgSqlCode(-2050) << DgString0(attrName); return attrEnum; } // Perform a lookup for the string, using a binary search. attrEnum = lookupAttrName(attrName, -1); if (attrEnum < 0) { *CmpCommon::diags() << DgSqlCode(-2050) << DgString0(attrName); return attrEnum; } char * value = NULL; if (holdOrRestoreCQD == 1) // hold cqd { if (heldDefaults_[attrEnum]) { NADELETEBASIC(heldDefaults_[attrEnum], NADHEAP); } if (currentDefaults_[attrEnum]) { value = new NADHEAP char[strlen(currentDefaults_[attrEnum]) + 1]; strcpy(value, currentDefaults_[attrEnum]); } else { value = new NADHEAP char[strlen(defaultDefaults[defDefIx_[attrEnum]].value) + 1]; strcpy(value, defaultDefaults[defDefIx_[attrEnum]].value); } heldDefaults_[attrEnum] = value; } else { // restore cqd from heldDefaults_ array, if it was held. if (! heldDefaults_[attrEnum]) return attrEnum; if (currentDefaults_[attrEnum]) { NADELETEBASIC(currentDefaults_[attrEnum], NADHEAP); value = new NADHEAP char[strlen(heldDefaults_[attrEnum]) + 1]; strcpy(value, heldDefaults_[attrEnum]); currentDefaults_[attrEnum] = value; } NADELETEBASIC(heldDefaults_[attrEnum], NADHEAP); heldDefaults_[attrEnum] = NULL; } return attrEnum; } const SqlParser_NADefaults *NADefaults::getSqlParser_NADefaults() { // "Precompile" the MPLOC into a handier format for name resolution. // The pure ComMPLoc is used in a few places, and the SchemaName form // is used when NAMETYPE is NSK. // if (SqlParser_NADefaults_->MPLOC_.getFormat() == ComMPLoc::UNKNOWN) { NAString sys, vol, subvol; getValue(MP_SYSTEM, sys); getValue(MP_VOLUME, vol); getValue(MP_SUBVOLUME, subvol); if (!sys.isNull()) sys += "."; sys += vol + "." + subvol; SqlParser_NADefaults_->MPLOC_.parse(sys, ComMPLoc::SUBVOL); // For NAMETYPE NSK, catalog name is e.g. "\AZTEC.$FOO" SqlParser_NADefaults_->MPLOC_as_SchemaName_.setCatalogName( SqlParser_NADefaults_->MPLOC_.getSysDotVol()); // For NAMETYPE NSK, schema name is e.g. " SqlParser_NADefaults_->MPLOC_as_SchemaName_.setSchemaName( SqlParser_NADefaults_->MPLOC_.getSubvolName()); // We've already validated the heck out of this // in validateAndInsert() and setMPLoc()! #if defined(NA_NSK) || defined(_DEBUG) CMPASSERT(SqlParser_NADefaults_->MPLOC_.isValid(ComMPLoc::SUBVOL)); #endif // defined(NA_NSK) || defined(_DEBUG) } return SqlParser_NADefaults_; } static void setCatSchErr(NAString &value, Lng32 sqlCode, Int32 errOrWarn, NABoolean catErr = FALSE) { if (!sqlCode || !errOrWarn) return; TrimNAStringSpace(value); // prettify further (neater errmsg) *CmpCommon::diags() << DgSqlCode(ERRWARN(sqlCode)) << DgCatalogName(value) << DgSchemaName(value) << DgString0(value) << DgString1(value); if (value.first('"') == NA_NPOS) { // delimited names too complicated ! NAString namepart = value; size_t dot = value.first('.'); if (dot != NA_NPOS) { namepart.remove(dot); if (!IsSqlReservedWord(namepart)) { namepart = value; namepart.remove(0, dot+1); } } if (IsSqlReservedWord(namepart)) { *CmpCommon::diags() << DgSqlCode(ERRWARN(3128)) << DgString0(namepart) << DgString1(namepart); return; } } // must determine if the defaults have been set up before parseDML is called if (IdentifyMyself::GetMyName() == I_AM_UNKNOWN){ return; // diagnostic already put into diags above. } // Produce additional (more informative) syntax error messages, // trying delimited-value first and then possibly regular-value-itself. Parser parser(CmpCommon::context()); Lng32 errs = CmpCommon::diags()->getNumber(DgSqlCode::ERROR_); NAString pfx(catErr ? "SET CATALOG " : "SET SCHEMA "); NAString stmt; char c = *value.data(); if (c && c != '\"') { stmt = pfx; stmt += "\""; stmt += value; stmt += "\""; stmt += ";"; #pragma nowarn(1506) // warning elimination parser.parseDML(stmt, stmt.length(), OBJECTNAMECHARSET ); #pragma warn(1506) // warning elimination } if (errs == CmpCommon::diags()->getNumber(DgSqlCode::ERROR_)) { stmt = pfx; stmt += value; stmt += ";"; #pragma nowarn(1506) // warning elimination parser.parseDML(stmt, stmt.length(), OBJECTNAMECHARSET ); #pragma warn(1506) // warning elimination } // Change errors to warnings if errOrWarn is +1 (i.e. warning). if (errOrWarn > 0) NegateAllErrors(CmpCommon::diags()); } NABoolean NADefaults::setCatalog(NAString &value, Int32 errOrWarn, NABoolean overwrite, NABoolean alreadyCanonical) { setCatUserID(currentState_ == COMPUTED); // The input value is in external (Ansi) format. // If we are in the COMPUTED currentState_, // make the value strictly canonical, // and try non-delimited first, then delimited. // Prettify removes lead/trailing blanks, // and upcases where unquoted (for nicer errmsgs); // ComSchemaName parses/validates. // if (alreadyCanonical) ; // leave it alone, for performance's sake else if (currentState_ == COMPUTED) { // ' SQL.FOO' TrimNAStringSpace(value); // 'SQL.FOO' NAString tmp(value); value = ToAnsiIdentifier(value); // nondelim ok? if (value.isNull()) value = NAString("\"") + tmp + "\""; // '"SQL.FOO"' } else PrettifySqlText(value); ComSchemaName nam(value); if (nam.getSchemaNamePart().isEmpty() || // 0 name parts, if *any* error !nam.getCatalogNamePart().isEmpty()) { // 2 parts (cat.sch) is an error setCatSchErr(value, EXE_INVALID_CAT_NAME, errOrWarn, TRUE); return FALSE; // invalid value } else { // Get the 1 name part (the "schema" part as far as ComSchema knows...) if (overwrite) insert(CATALOG, nam.getSchemaNamePartAsAnsiString()); return TRUE; } } NABoolean NADefaults::setMPLoc(const NAString &value, Int32 errOrWarn, Provenance overwriteIfNotYet) { NABoolean isValid = TRUE; // Validate the entire string all at once, // so that if any namepart is in error, // we insert NONE of the MP_xxx values. ComMPLoc loc(value, ComMPLoc::SUBVOL); if (!loc.isValid(ComMPLoc::SUBVOL)) { // Call the MPLOC validator solely to emit proper errmsg validateNSKMPLoc.validate(value, this, MP_SUBVOLUME, errOrWarn); isValid = FALSE; } else { NAString v; DefaultConstants e; if (loc.hasSystemName()) { v = loc.getSystemName(); e = validateAndInsert("MP_SYSTEM", v, 0, errOrWarn, overwriteIfNotYet); CMPASSERT(e >= 0); // this is just double-checking! } v = loc.getVolumeName(); e = validateAndInsert("MP_VOLUME", v, 0, errOrWarn, overwriteIfNotYet); CMPASSERT(e >= 0); // this is just double-checking! v = loc.getSubvolName(); e = validateAndInsert("MP_SUBVOLUME", v, 0, errOrWarn, overwriteIfNotYet); CMPASSERT(e >= 0); // this is just double-checking! } return isValid; } NABoolean NADefaults::setSchema(NAString &value, Int32 errOrWarn, NABoolean overwrite, NABoolean alreadyCanonical) { // if this is part of CQD *RESET and it was initialized with role name // do not change the following flags // to allow DEFAULT_SCHEMA_NAMETYPE to set its value if (!( schSetToUserID() && isResetAll() )) { setSchUserID(currentState_ == COMPUTED); setSchByNametype(FALSE); } if (alreadyCanonical) ; // leave it alone, for performance's sake else if (currentState_ == COMPUTED) { // ' SQL.FOO' TrimNAStringSpace(value); // 'SQL.FOO' NAString tmp(value); value = ToAnsiIdentifier(value); // nondelim ok? if (value.isNull()) value = NAString("\"") + tmp + "\""; // '"SQL.FOO"' } else PrettifySqlText(value); ComSchemaName nam(value); if (nam.getSchemaNamePart().isEmpty()) { // 0 name parts, if *any* error setCatSchErr(value, EXE_INVALID_SCH_NAME, errOrWarn); return FALSE; // invalid value } else { if (overwrite) insert(SCHEMA, nam.getSchemaNamePartAsAnsiString()); // If 2 parts, overwrite any prior catalog default if (!nam.getCatalogNamePart().isEmpty()) { if (overwrite) { insert(CATALOG, nam.getCatalogNamePartAsAnsiString()); if (currentState_ == SET_BY_CQD) { // indicate that this attribute was set by a user CQD. setUserDefault(CATALOG, TRUE); } } } return TRUE; } } NAString NADefaults::keyword(DefaultToken tok) { CMPASSERT(tok >= 0 && tok < DF_lastToken); return keywords_[tok]; } // Defaults Tokens // There is a set of keywords which can appear as values of Defaults entries // in the Defaults Table. We declare, for each such token, a string (the // keyword), and an enumeration value. The string values belong in an // array, DFkeywords, in sorted order. The idea is we can use binary // search in order to obtain the index of a string to the matching // entry in this sorted array. // // If we define the enumerations carefully (pay attention here!), then // that index we just found (see previous paragraph) is the enum value // of the token. // In simple words: this has to be in identical order with enum DefaultToken // in DefaultConstants.h const char *NADefaults::keywords_[DF_lastToken] = { "ACCUMULATED", "ADVANCED", "AGGRESSIVE", "ALL", "ANSI", "BEGINNER", "BOTH", "CLEAR", "DEBUG", "DISK", "DISK_POOL", "DUMP", "DUMP_MV", "EXTERNAL", "EXTERNAL_DETAILED", "FIRSTROW", "HARDWARE", "HEAP", "HIGH", "HYBRID", "IEEE", "INDEXES", "INTERNAL", "IQS", "JNI", "JNI_TRX", "KEYINDEXES", "LASTROW", "LATEST", "LOADNODUP", "LOCAL", "LOCAL_NODE", "LOG", "MAXIMUM", "MEASURE", "MEDIUM", "MEDIUM_LOW", "MINIMUM", "MMAP", "MULTI_NODE", "MVCC", "NONE", "NSK", "OFF", "ON", "OPENS_FOR_WRITE", "OPERATOR", "ORDERED", "PERTABLE", "PRINT", "PRIVATE", "PUBLIC", "QS", "READ_COMMITTED", "READ_UNCOMMITTED", "RELEASE", "REMOTE", "REPEATABLE_READ", "REPSEL", "RESOURCES", "RETURN", "SAMPLE", "SERIALIZABLE", "SHORTANSI", "SIMPLE", "SKIP", "SMD", "SOFTWARE", "SOURCE", "SQLMP", "SSCC", "SSD", "STOP", "SUFFIX", "SYSTEM", "TANDEM", "THRIFT", "USER", "VERTICAL", "WAIT", "WARN", "XML" }; // To call bsearch we must satisfy each of its arguments. Either // NULL comes back, or, comes back a pointer to the element which is // a true match for our key. bsearch.key is upperKey.data(). // bsearch.base is keywords_. nel is DF_lastToken. // The next argument is sizeof char*. Finally, the comparison // function can simply be the strcmp function. // // Note that this function makes heavy reliance on the idea that // the DefaultToken enumerations go up in sequence 0, 1, 2, 3... . // // We do the cast on strcmp because its signature from the header // file is: int (*)(const char *, const char *). In general, we're // doing a lot of type casting in here. static Int32 stringCompare(const void* s1, const void* s2) { return strcmp( * (char**) s1, * (char**) s2); } DefaultToken NADefaults::token(Int32 attrEnum, NAString &value, NABoolean valueAlreadyGotten, Int32 errOrWarn) const { ATTR_RANGE_ASSERT; if (!valueAlreadyGotten) { value = getValue(attrEnum); // already trim & upper (by validateAndInsert) TrimNAStringSpace(value); // can't trust that the stored value is canonical } else { TrimNAStringSpace(value); // can't trust that input value is canonical, value.toUpper(); // so here do what validateAndInsert does } DefaultToken tok = DF_noSuchToken; if (value.isNull()) tok = DF_SYSTEM; else { if ((attrEnum == TERMINAL_CHARSET) || (attrEnum == USE_HIVE_SOURCE) || (attrEnum == HBASE_DATA_BLOCK_ENCODING_OPTION) || (attrEnum == HBASE_COMPRESSION_OPTION)) return DF_USER; if ( attrEnum == NATIONAL_CHARSET || attrEnum == DEFAULT_CHARSET || attrEnum == HIVE_DEFAULT_CHARSET || attrEnum == ISO_MAPPING || attrEnum == INPUT_CHARSET || attrEnum == TRAF_DEFAULT_COL_CHARSET ) { CharInfo::CharSet cs = CharInfo::getCharSetEnum(value); Int32 err_found = 0; if ( !CharInfo::isCharSetSupported(cs) ) { err_found = 1; } else { switch( attrEnum ) { case NATIONAL_CHARSET: if (cs == CharInfo::KANJI_MP) break; //Allow (for regression test) if ((cs != CharInfo::UNICODE) && (cs != CharInfo::ISO88591)) err_found = 1; break; case DEFAULT_CHARSET: if (cs != CharInfo::ISO88591 && cs != CharInfo::UTF8 // && cs != CharInfo::SJIS ) err_found = 1; break; case HIVE_DEFAULT_CHARSET: case TRAF_DEFAULT_COL_CHARSET: if ((cs != CharInfo::UTF8) && (cs != CharInfo::ISO88591)) err_found = 1; break; case ISO_MAPPING: if (cs != CharInfo::ISO88591) err_found = 1; break; default: break; } } if ( (err_found != 0) && errOrWarn ) *CmpCommon::diags() << DgSqlCode(ERRWARN(3010)) << DgString0(value); else return DF_USER; // kludge, return any valid token } //else //else fall thru to see if value is SYSTEM // OPTIMIZATION_LEVEL if ((attrEnum == OPTIMIZATION_LEVEL) && value.length() == 1) switch (*value.data()) { case '0': return DF_MINIMUM; case '1': return DF_MINIMUM; case '2': return DF_MEDIUM_LOW; case '3': return DF_MEDIUM; case '4': return DF_MEDIUM; case '5': return DF_MAXIMUM; } // PCODE_OPT_LEVEL if ((attrEnum == PCODE_OPT_LEVEL) && value.length() == 1) switch (*value.data()) { case '0': return DF_MINIMUM; case '1': return DF_MEDIUM; case '2': return DF_HIGH; case '3': return DF_MAXIMUM; } if ( attrEnum == TEMPORARY_TABLE_HASH_PARTITIONS || attrEnum == MVQR_REWRITE_CANDIDATES || attrEnum == MVQR_PUBLISH_TABLE_LOCATION || attrEnum == MVQR_WORKLOAD_ANALYSIS_MV_NAME || attrEnum == HIST_SCRATCH_VOL) return DF_SYSTEM; const char *k = value.data(); char *match = (char*) bsearch( &k, keywords_, DF_lastToken, sizeof(char*), stringCompare); if (match) tok = (DefaultToken) (((const char**) match) - keywords_); else { // Check for synonyms const char *c = value; for (; *c == '0'; c++) ; // all ascii '0' ? if (*c == '\0') // terminating nul '\0' tok = DF_OFF; else if (value.length() <= 2) { if (value == "1" || value == "+1" || value == "-1") tok = DF_ON; } else { if ((value == "STOP_AT") || (value == "STOP AT")) tok = DF_STOP; else if (value == "READ COMMITTED") tok = DF_READ_COMMITTED; else if (value == "READ UNCOMMITTED") tok = DF_READ_UNCOMMITTED; else if (value == "REPEATABLE READ") tok = DF_REPEATABLE_READ; else if (value == "BEGINNER") tok = DF_BEGINNER; else if (value == "ADVANCED") tok = DF_ADVANCED; #define CONVERT_SYNONYM(from,to) \ else if (value == "" # from "") { \ CMPASSERT(DF_ ## from == DF_ ## to); \ tok = DF_ ## to; \ } CONVERT_SYNONYM(COMPAQ, TANDEM) CONVERT_SYNONYM(DISABLE, OFF) CONVERT_SYNONYM(ENABLE, SYSTEM) CONVERT_SYNONYM(FALSE, OFF) CONVERT_SYNONYM(FULL, MAXIMUM) CONVERT_SYNONYM(TRUE, ON) } } } NABoolean isValid = FALSE; if (tok != DF_noSuchToken) switch (attrEnum) { case DEFAULT_SCHEMA_ACCESS_ONLY: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case DEFAULT_SCHEMA_NAMETYPE: if (tok == DF_SYSTEM || tok == DF_USER) isValid = TRUE; break; case DETAILED_STATISTICS: if (tok == DF_ALL || tok == DF_MEASURE || tok == DF_ACCUMULATED || tok == DF_OPERATOR || tok == DF_PERTABLE || tok == DF_OFF) isValid = TRUE; break; case FLOATTYPE: if (tok == DF_TANDEM || tok == DF_IEEE) isValid = TRUE; break; case GROUP_BY_USING_ORDINAL: if (tok == DF_ALL || tok == DF_MINIMUM || tok == DF_OFF) isValid = TRUE; break; case EXE_PARALLEL_PURGEDATA: if (tok == DF_ALL || tok == DF_MINIMUM || tok == DF_OFF || tok == DF_ON || tok == DF_MEDIUM) isValid = TRUE; break; case HIDE_INDEXES: if (tok == DF_NONE || tok == DF_ALL || tok == DF_VERTICAL || tok == DF_INDEXES || tok == DF_KEYINDEXES) isValid = TRUE; break; case INDEX_ELIMINATION_LEVEL: if (tok == DF_MINIMUM || tok == DF_MEDIUM || tok == DF_MAXIMUM || tok == DF_AGGRESSIVE ) isValid = TRUE; break; case IF_LOCKED: if (tok == DF_RETURN || tok == DF_WAIT) isValid = TRUE; break; case INSERT_VSBB: if (tok == DF_OFF || tok == DF_LOADNODUP || tok == DF_SYSTEM || tok == DF_USER) isValid = TRUE; break; case OVERFLOW_MODE: if (tok == DF_DISK || tok == DF_SSD || tok == DF_MMAP) isValid = TRUE; break; case SORT_ALGO: if(tok == DF_HEAP || tok == DF_IQS || tok == DF_REPSEL || tok == DF_QS) isValid = TRUE; break; case QUERY_CACHE_MPALIAS: case QUERY_TEMPLATE_CACHE: case SHARE_TEMPLATE_CACHED_PLANS: case VSBB_TEST_MODE: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case QUERY_TEXT_CACHE: if (tok == DF_ON || tok == DF_OFF || tok == DF_SYSTEM || tok == DF_SKIP) isValid = TRUE; break; case DISABLE_BUFFERED_INSERTS: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case ISOLATION_LEVEL: { TransMode::IsolationLevel iltmp; isValid = getIsolationLevel(iltmp, tok); } break; case ISOLATION_LEVEL_FOR_UPDATES: { TransMode::IsolationLevel iltmp; isValid = getIsolationLevel(iltmp, tok); } break; case MVGROUP_AUTOMATIC_CREATION: case MV_TRACE_INCONSISTENCY: //++ MV case MV_AS_ROW_TRIGGER: //++ MV { if(DF_ON == tok || DF_OFF == tok) { isValid = TRUE; } } break; case IUD_NONAUDITED_INDEX_MAINT: if (tok == DF_OFF || tok == DF_SYSTEM || tok == DF_WARN || tok == DF_ON) isValid = TRUE; break; case IS_SQLCI: // for primary mxcmp that is invoked for user queries, the only valid // value for mxci_process cqd is TRUE. This cqd is set once by mxci // at startup time and cannot be changed by user. That way we know that // a request has come in from mxci(trusted) process. // For secondary mxcmp's invoked for internal queries where cqd's are // sent using sendAllControls method, all values are valid. This will // ensure that if this default is not set and is sent over to secondary // mxcmp using an internal CQD statement, it doesn't return an error. if (cmpCurrentContext->isSecondaryMxcmp()) { if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; } else { if (tok == DF_ON) isValid = TRUE; } break; case NVCI_PROCESS: // for primary mxcmp that is invoked for user queries, the only valid // value for nvci_process cqd is TRUE. This cqd is set once by nvci // at startup time and cannot be changed by user. That way we know that // a request has come in from nvci(trusted) process. // For secondary mxcmp's invoked for internal queries where cqd's are // sent using sendAllControls method, all values are valid. This will // ensure that if this default is not set and is sent over to secondary // mxcmp using an internal CQD statement, it doesn't return an error. if (cmpCurrentContext->isSecondaryMxcmp()) { if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; } else { if (tok == DF_ON) isValid = TRUE; } break; case NAMETYPE: if (tok == DF_ANSI || tok == DF_SHORTANSI || tok == DF_NSK) isValid = TRUE; break; case OPTIMIZATION_GOAL: if (tok == DF_FIRSTROW || tok == DF_LASTROW || tok == DF_RESOURCES) isValid = TRUE; break; case USER_EXPERIENCE_LEVEL: if (tok == DF_ADVANCED || tok == DF_BEGINNER) isValid = TRUE; break; case PCODE_OPT_LEVEL: if (tok == DF_OFF) { isValid = TRUE; break; } // else fall through to the next case, all those keywords are allowed // as well case ATTEMPT_ESP_PARALLELISM: if (tok == DF_SYSTEM || tok == DF_ON || tok == DF_OFF || tok == DF_MAXIMUM) isValid = TRUE; break; case OPTIMIZATION_LEVEL: if (tok == DF_MINIMUM || tok == DF_MEDIUM_LOW || tok == DF_MEDIUM || tok == DF_MAXIMUM) isValid = TRUE; break; case ROBUST_QUERY_OPTIMIZATION: if (tok == DF_MINIMUM || tok == DF_SYSTEM || tok == DF_MAXIMUM || tok == DF_HIGH) isValid = TRUE; break; case REFERENCE_CODE: case TARGET_CODE: if (tok == DF_RELEASE || tok == DF_DEBUG) isValid = TRUE; break; /* case ROLLBACK_ON_ERROR: if (tok == DF_OFF || tok == DF_ON || tok == DF_SYSTEM) isValid = TRUE; break; */ case AUTO_QUERY_RETRY: if (tok == DF_ON || tok == DF_OFF || tok == DF_SYSTEM) isValid = TRUE; break; case AUTO_QUERY_RETRY_WARNINGS: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case EXE_PARALLEL_DDL: if (tok == DF_OFF || tok == DF_ON || tok == DF_EXTERNAL || tok == DF_INTERNAL) isValid = TRUE; break; case UNAVAILABLE_PARTITION: if (tok == DF_SKIP || tok == DF_STOP) isValid = TRUE; break; case QUERY_CACHE_STATISTICS: // on, off are no-ops if (tok == DF_PRINT || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case QUERY_CACHE_STATEMENT_PINNING: if (tok == DF_CLEAR || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case HJ_TYPE: if (tok == DF_ORDERED || tok == DF_HYBRID || tok == DF_SYSTEM) isValid = TRUE; break; case REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT: if (tok == DF_OFF || tok == DF_ON || tok == DF_SYSTEM) isValid = TRUE; break; case POS: if (tok == DF_LOCAL_NODE || tok == DF_OFF || tok == DF_MULTI_NODE || tok == DF_DISK_POOL) isValid = TRUE; break; case USTAT_INTERNAL_SORT: if (tok == DF_ON || tok == DF_OFF || tok == DF_HYBRID) isValid = TRUE; break; case USTAT_AUTO_FOR_VOLATILE_TABLES: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case SUBQUERY_UNNESTING: if (tok == DF_OFF || tok == DF_ON || tok == DF_DEBUG) isValid = TRUE; break; case SORT_INTERMEDIATE_SCRATCH_CLEANUP: if(tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case SORT_MEMORY_QUOTA_SYSTEM: if(tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; /* If MDAM_SCAN_METHOD's value is "MAXIMUM" only, Right side of Nested Join will use the MDAM path Allowable values for MDAM_SCAN_METHOD are 'ON' | 'OFF' | 'MAXIMUM' */ case MDAM_SCAN_METHOD: if (tok == DF_ON || tok == DF_OFF || tok == DF_MAXIMUM) isValid = TRUE; break; case SHOWDDL_DISPLAY_FORMAT: if (tok == DF_INTERNAL || tok == DF_EXTERNAL || tok == DF_LOG) isValid = TRUE; break; case SHOWDDL_DISPLAY_PRIVILEGE_GRANTS: if (tok == DF_SYSTEM || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case EXPLAIN_DISPLAY_FORMAT: if (tok == DF_INTERNAL || tok == DF_EXTERNAL || tok == DF_EXTERNAL_DETAILED) isValid = TRUE; break; case UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY: if (tok == DF_ON || tok == DF_OFF || tok == DF_AGGRESSIVE) isValid = TRUE; break; case MVQR_ALL_JBBS_IN_QD: case MVQR_REWRITE_ENABLED_OPTION: case MVQR_REWRITE_SINGLE_TABLE_QUERIES: case MVQR_USE_EXTRA_HUB_TABLES: case MVQR_ENABLE_LOGGING: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case MVQR_LOG_QUERY_DESCRIPTORS: if (tok == DF_OFF || tok == DF_DUMP || tok == DF_DUMP_MV || tok == DF_LOG) isValid = TRUE; break; case MVQR_PRIVATE_QMS_INIT: if (tok == DF_SMD || tok == DF_XML || tok == DF_NONE) isValid = TRUE; break; case MVQR_PUBLISH_TO: if (tok == DF_PUBLIC || tok == DF_PRIVATE || tok == DF_BOTH || tok == DF_NONE) isValid = TRUE; break; case MVQR_WORKLOAD_ANALYSIS_MV_NAME: isValid = TRUE; break; case ELIMINATE_REDUNDANT_JOINS: if (tok == DF_OFF || tok == DF_ON || tok == DF_DEBUG || tok == DF_MINIMUM) isValid = TRUE; break; case VOLATILE_TABLE_FIND_SUITABLE_KEY: if (tok == DF_SYSTEM || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case CAT_DISTRIBUTE_METADATA: if (tok == DF_OFF || tok == DF_LOCAL_NODE || tok == DF_ON) isValid = TRUE; break; case MV_DUMP_DEBUG_INFO: if (tok == DF_OFF || tok == DF_ON) isValid = TRUE; break; case RANGESPEC_TRANSFORMATION: if (tok == DF_OFF || tok == DF_ON || tok == DF_MINIMUM) isValid = TRUE; break; case ASYMMETRIC_JOIN_TRANSFORMATION: if (tok == DF_MINIMUM || tok == DF_MAXIMUM) isValid = TRUE; break; case CAT_DEFAULT_COMPRESSION: if (tok == DF_NONE || tok == DF_HARDWARE || tok == DF_SOFTWARE) isValid = TRUE; break; case REPLICATE_DISK_POOL: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case COMPRESSION_TYPE: if (tok == DF_NONE || tok == DF_HARDWARE || tok == DF_SOFTWARE) isValid = TRUE; break; // The DF_SAMPLE setting indicates that the persistent sample will be // updated incrementally, but not the histograms; they will be created // anew from the incrementally updated sample. case USTAT_INCREMENTAL_UPDATE_STATISTICS: if (tok == DF_OFF || tok == DF_SAMPLE || tok == DF_ON) isValid = TRUE; break; case REPLICATE_COMPRESSION_TYPE: if (tok == DF_NONE || tok == DF_HARDWARE || tok == DF_SOFTWARE || tok == DF_SOURCE || tok == DF_SYSTEM) isValid = TRUE; break; case REUSE_OPENS: if (tok==DF_ON || tok == DF_OFF || tok == DF_OPENS_FOR_WRITE) isValid = TRUE; break; case USE_HIVE_SOURCE: isValid = TRUE; break; case TRAF_TABLE_SNAPSHOT_SCAN: if (tok == DF_NONE || tok == DF_SUFFIX || tok == DF_LATEST) isValid = TRUE; break; case LOB_OUTPUT_SIZE: if (tok >=0 && tok <= 512000) isValid = TRUE; break; case TRAF_TRANS_TYPE: if (tok == DF_MVCC || tok == DF_SSCC) isValid = TRUE; break; // Nothing needs to be added here for ON/OFF/SYSTEM keywords -- // instead, add to DEFAULT_ALLOWS_SEPARATE_SYSTEM code in the ctor. default: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; } // See "NOTE 2" way up top. if (!isValid) { if (tok == DF_SYSTEM) { isValid = isFlagOn(attrEnum, DEFAULT_ALLOWS_SEPARATE_SYSTEM); if (!isValid) { NAString tmp(getDefaultDefaultValue(attrEnum)); isValid = isSynonymOfSYSTEM(attrEnum, tmp); } } } if (!isValid) { tok = DF_noSuchToken; if (errOrWarn) *CmpCommon::diags() << DgSqlCode(ERRWARN(2055)) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); } return tok; } DefaultToken NADefaults::getToken( const Int32 attrEnum, const Int32 errOrWarn ) const { // Check the cache first. if ( currentTokens_[attrEnum] != NULL ) { return *currentTokens_[attrEnum]; } // Get the token and allocate memory to store the token value. NAString tmp( NADHEAP ); currentTokens_[attrEnum] = new NADHEAP DefaultToken; *currentTokens_[attrEnum] = token( attrEnum, tmp, FALSE, errOrWarn ); return *currentTokens_[attrEnum]; } NABoolean NADefaults::getIsolationLevel(TransMode::IsolationLevel &arg, DefaultToken tok) const { NABoolean specifiedOK = TRUE; if (tok == DF_noSuchToken) tok = getToken(ISOLATION_LEVEL); switch (tok) { case DF_READ_COMMITTED: arg = TransMode::READ_COMMITTED_; break; case DF_READ_UNCOMMITTED: arg = TransMode::READ_UNCOMMITTED_; break; case DF_REPEATABLE_READ: arg = TransMode::REPEATABLE_READ_; break; case DF_SERIALIZABLE: case DF_SYSTEM: arg = TransMode::SERIALIZABLE_; break; case DF_NONE: arg = TransMode::IL_NOT_SPECIFIED_; break; default: arg = TransMode::SERIALIZABLE_; specifiedOK = FALSE; NAString value(NADHEAP); if (tok != DF_noSuchToken) value = keyword(tok); *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1("ISOLATION_LEVEL"); } return specifiedOK; } // find the packed length for all the default values stored // in currentDefaults_ array. // currentDefaults_ is a fixed sized array of "char *" where each // entry is pointing to the default value for that default. // After pack, the default values are put in the buffer in // sequential order with a null terminator. Lng32 NADefaults::packedLengthDefaults() { Lng32 size = 0; const size_t numAttrs = numDefaultAttributes(); for (size_t i = 0; i < numAttrs; i++) { size += strlen(currentDefaults_[i]) + 1; } return size; } Lng32 NADefaults::packDefaultsToBuffer(char * buffer) { const size_t numAttrs = numDefaultAttributes(); Lng32 totalSize = 0; Lng32 size = 0; for (UInt32 i = 0; i < numAttrs; i++) { size = (Lng32)strlen(currentDefaults_[i]) + 1; strcpy(buffer, currentDefaults_[i]); buffer += size; totalSize += size; } return totalSize; } Lng32 NADefaults::unpackDefaultsFromBuffer(Lng32 numEntriesInBuffer, char * buffer) { return 0; } NABoolean NADefaults::isSameCQD(Lng32 numEntriesInBuffer, char * buffer, Lng32 bufLen) { const Lng32 numCurrentDefaultAttrs = (Lng32)numDefaultAttributes(); // check to see if the default values in 'buffer' are the same // as those in the currentDefaults_ array. // Return TRUE if they are all the same. if (numCurrentDefaultAttrs != numEntriesInBuffer) return FALSE; if (bufLen == 0) return FALSE; Int32 curPos = 0; for (Int32 i = 0; i < numEntriesInBuffer; i++) { if (strcmp(currentDefaults_[i], &buffer[curPos]) != 0) return FALSE; curPos += strlen(&buffer[curPos]) + 1; } // everything matches. return TRUE; } Lng32 NADefaults::createNewDefaults(Lng32 numEntriesInBuffer, char * buffer) { const Lng32 numCurrentDefaultAttrs = (Lng32)numDefaultAttributes(); // save the current defaults savedCurrentDefaults_ = currentDefaults_; savedCurrentFloats_ = currentFloats_; savedCurrentTokens_ = currentTokens_; // VO, Plan Versioning Support. // // This code may execute in a downrev compiler, which knows about fewer // defaults than the compiler originally used to compile the statement. // Only copy those defaults we know about, and skip the rest. Lng32 numEntriesToCopy = _min (numEntriesInBuffer, numCurrentDefaultAttrs); // allocate a new currentDefaults_ array and make it point to // the default values in the input 'buffer'. // If the current number of default attributes are greater than the // ones in the input buffer, then populate the remaining default // entries in the currentDefaults_ array with the values from the // the savedCurrentDefaults_. currentDefaults_ = new NADHEAP const char * [numCurrentDefaultAttrs]; Int32 curPos = 0; Int32 i = 0; for (i = 0; i < numEntriesToCopy; i++) { currentDefaults_[i] = &buffer[curPos]; curPos += strlen(&buffer[curPos]) + 1; } for (i = numEntriesToCopy; i < numCurrentDefaultAttrs; i++) { currentDefaults_[i] = savedCurrentDefaults_[i]; } // allocate two empty arrays for floats and tokens. currentFloats_ = new NADHEAP float * [numCurrentDefaultAttrs]; currentTokens_ = new NADHEAP DefaultToken * [numCurrentDefaultAttrs]; memset( currentFloats_, 0, sizeof(float *) * numCurrentDefaultAttrs ); memset( currentTokens_, 0, sizeof(DefaultToken *) * numCurrentDefaultAttrs ); return 0; } Lng32 NADefaults::restoreDefaults(Lng32 numEntriesInBuffer, char * buffer) { // Deallocate the currentDefaults_ array. // The array entries are not to be deleted as they point to // entries in 'buffer' or the 'savedCurrentDefaults_'. // See NADefaults::createNewDefaults() method. if (currentDefaults_) { NADELETEBASIC(currentDefaults_, NADHEAP); } if (currentFloats_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentFloats_[i], NADHEAP); NADELETEBASIC(currentFloats_, NADHEAP); } if (currentTokens_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentTokens_[i], NADHEAP); NADELETEBASIC(currentTokens_, NADHEAP); } // restore the saved defaults currentDefaults_ = savedCurrentDefaults_; currentFloats_ = savedCurrentFloats_; currentTokens_ = savedCurrentTokens_; return 0; } void NADefaults::updateCurrentDefaultsForOSIM(DefaultDefault * defaultDefault, NABoolean validateFloatVal) { Int32 attrEnum = defaultDefault->attrEnum; const char * defaultVal = defaultDefault->value; const char * valueStr = currentDefaults_[attrEnum]; if(valueStr) { NADELETEBASIC(valueStr,NADHEAP); } char * value = new NADHEAP char[strlen(defaultVal) + 1]; strcpy(value, defaultVal); currentDefaults_[attrEnum] = value; if ( validateFloatVal ) { float floatVal = 0; if (validateFloat(currentDefaults_[attrEnum], floatVal, attrEnum)) { if (currentFloats_[attrEnum]) { NADELETEBASIC(currentFloats_[attrEnum], NADHEAP); } currentFloats_[attrEnum] = new NADHEAP float; *currentFloats_[attrEnum] = floatVal; } } if ( currentTokens_[attrEnum] ) { NADELETEBASIC( currentTokens_[attrEnum], NADHEAP ); currentTokens_[attrEnum] = NULL; } } void NADefaults::setSchemaAsLdapUser(const NAString val) { NAString ldapUsername = val; if ( ldapUsername.isNull() ) ldapUsername = getValue(LDAP_USERNAME); if ( ldapUsername.isNull() ) return; ldapUsername.toUpper(); NAString schName = '"'; schName += ldapUsername; schName += '"'; // check schema name before insert // may get special characters from ldap ComSchemaName cSchName(schName); if ( !cSchName.getSchemaNamePart().isEmpty() && cSchName.getCatalogNamePart().isEmpty()) // should have no catalog { insert(SCHEMA, schName); } else { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(schName) << DgString1("SCHEMA"); } }
1
7,086
What units are used for this default?
apache-trafodion
cpp
@@ -47,7 +47,13 @@ public struct Ability : IFlatbufferObject public class AbilityT { +#if ENABLE_JSON_SERIALIZATION + [Newtonsoft.Json.JsonProperty("id")] +#endif public uint Id { get; set; } +#if ENABLE_JSON_SERIALIZATION + [Newtonsoft.Json.JsonProperty("distance")] +#endif public uint Distance { get; set; } public AbilityT() {
1
// <auto-generated> // automatically generated by the FlatBuffers compiler, do not modify // </auto-generated> namespace MyGame.Example { using global::System; using global::System.Collections.Generic; using global::FlatBuffers; public struct Ability : IFlatbufferObject { private Struct __p; public ByteBuffer ByteBuffer { get { return __p.bb; } } public void __init(int _i, ByteBuffer _bb) { __p = new Struct(_i, _bb); } public Ability __assign(int _i, ByteBuffer _bb) { __init(_i, _bb); return this; } public uint Id { get { return __p.bb.GetUint(__p.bb_pos + 0); } } public void MutateId(uint id) { __p.bb.PutUint(__p.bb_pos + 0, id); } public uint Distance { get { return __p.bb.GetUint(__p.bb_pos + 4); } } public void MutateDistance(uint distance) { __p.bb.PutUint(__p.bb_pos + 4, distance); } public static Offset<MyGame.Example.Ability> CreateAbility(FlatBufferBuilder builder, uint Id, uint Distance) { builder.Prep(4, 8); builder.PutUint(Distance); builder.PutUint(Id); return new Offset<MyGame.Example.Ability>(builder.Offset); } public AbilityT UnPack() { var _o = new AbilityT(); this.UnPackTo(_o); return _o; } public void UnPackTo(AbilityT _o) { _o.Id = this.Id; _o.Distance = this.Distance; } public static Offset<MyGame.Example.Ability> Pack(FlatBufferBuilder builder, AbilityT _o) { if (_o == null) return default(Offset<MyGame.Example.Ability>); return CreateAbility( builder, _o.Id, _o.Distance); } }; public class AbilityT { public uint Id { get; set; } public uint Distance { get; set; } public AbilityT() { this.Id = 0; this.Distance = 0; } } }
1
17,711
I am wondering if it makes sense to make this a flag to `flatc` rather than a preprocessor flag, since this is generated code, it would make more sense to simply omit this code completely if the flag is not specified
google-flatbuffers
java
@@ -584,8 +584,7 @@ class MainWindow(QWidget): quit_texts = [] # Ask if multiple-tabs are open if 'multiple-tabs' in config.val.confirm_quit and tab_count > 1: - quit_texts.append("{} {} open.".format( - tab_count, "tab is" if tab_count == 1 else "tabs are")) + quit_texts.append("{} tabs are open.".format(tab_count)) # Ask if multiple downloads running if 'downloads' in config.val.confirm_quit and download_count > 0: quit_texts.append("{} {} running.".format(
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2019 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """The main window of qutebrowser.""" import binascii import base64 import itertools import functools from PyQt5.QtCore import (pyqtSlot, QRect, QPoint, QTimer, Qt, QCoreApplication, QEventLoop) from PyQt5.QtWidgets import QWidget, QVBoxLayout, QApplication, QSizePolicy from qutebrowser.commands import runners from qutebrowser.api import cmdutils from qutebrowser.config import config, configfiles from qutebrowser.utils import (message, log, usertypes, qtutils, objreg, utils, jinja, debug) from qutebrowser.mainwindow import messageview, prompt from qutebrowser.completion import completionwidget, completer from qutebrowser.keyinput import modeman from qutebrowser.browser import commands, downloadview, hints, downloads from qutebrowser.misc import crashsignal, keyhintwidget win_id_gen = itertools.count(0) def get_window(via_ipc, force_window=False, force_tab=False, force_target=None, no_raise=False): """Helper function for app.py to get a window id. Args: via_ipc: Whether the request was made via IPC. force_window: Whether to force opening in a window. force_tab: Whether to force opening in a tab. force_target: Override the new_instance_open_target config no_raise: suppress target window raising Return: ID of a window that was used to open URL """ if force_window and force_tab: raise ValueError("force_window and force_tab are mutually exclusive!") if not via_ipc: # Initial main window return 0 open_target = config.val.new_instance_open_target # Apply any target overrides, ordered by precedence if force_target is not None: open_target = force_target if force_window: open_target = 'window' if force_tab and open_target == 'window': # Command sent via IPC open_target = 'tab-silent' window = None should_raise = False # Try to find the existing tab target if opening in a tab if open_target != 'window': window = get_target_window() should_raise = open_target not in ['tab-silent', 'tab-bg-silent'] # Otherwise, or if no window was found, create a new one if window is None: window = MainWindow(private=None) window.show() should_raise = True if should_raise and not no_raise: raise_window(window) return window.win_id def raise_window(window, alert=True): """Raise the given MainWindow object.""" window.setWindowState(window.windowState() & ~Qt.WindowMinimized) window.setWindowState(window.windowState() | Qt.WindowActive) window.raise_() # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-69568 QCoreApplication.processEvents( QEventLoop.ExcludeUserInputEvents | QEventLoop.ExcludeSocketNotifiers) window.activateWindow() if alert: QApplication.instance().alert(window) def get_target_window(): """Get the target window for new tabs, or None if none exist.""" try: win_mode = config.val.new_instance_open_target_window if win_mode == 'last-focused': return objreg.last_focused_window() elif win_mode == 'first-opened': return objreg.window_by_index(0) elif win_mode == 'last-opened': return objreg.window_by_index(-1) elif win_mode == 'last-visible': return objreg.last_visible_window() else: raise ValueError("Invalid win_mode {}".format(win_mode)) except objreg.NoWindow: return None class MainWindow(QWidget): """The main window of qutebrowser. Adds all needed components to a vbox, initializes sub-widgets and connects signals. Attributes: status: The StatusBar widget. tabbed_browser: The TabbedBrowser widget. state_before_fullscreen: window state before activation of fullscreen. _downloadview: The DownloadView widget. _vbox: The main QVBoxLayout. _commandrunner: The main CommandRunner instance. _overlays: Widgets shown as overlay for the current webpage. _private: Whether the window is in private browsing mode. """ def __init__(self, *, private, geometry=None, parent=None): """Create a new main window. Args: geometry: The geometry to load, as a bytes-object (or None). private: Whether the window is in private browsing mode. parent: The parent the window should get. """ super().__init__(parent) # Late import to avoid a circular dependency # - browsertab -> hints -> webelem -> mainwindow -> bar -> browsertab from qutebrowser.mainwindow import tabbedbrowser from qutebrowser.mainwindow.statusbar import bar self.setAttribute(Qt.WA_DeleteOnClose) self._commandrunner = None self._overlays = [] self.win_id = next(win_id_gen) self.registry = objreg.ObjectRegistry() objreg.window_registry[self.win_id] = self objreg.register('main-window', self, scope='window', window=self.win_id) tab_registry = objreg.ObjectRegistry() objreg.register('tab-registry', tab_registry, scope='window', window=self.win_id) message_bridge = message.MessageBridge(self) objreg.register('message-bridge', message_bridge, scope='window', window=self.win_id) self.setWindowTitle('qutebrowser') self._vbox = QVBoxLayout(self) self._vbox.setContentsMargins(0, 0, 0, 0) self._vbox.setSpacing(0) self._init_downloadmanager() self._downloadview = downloadview.DownloadView(self.win_id) if config.val.content.private_browsing: # This setting always trumps what's passed in. private = True else: private = bool(private) self._private = private self.tabbed_browser = tabbedbrowser.TabbedBrowser(win_id=self.win_id, private=private, parent=self) objreg.register('tabbed-browser', self.tabbed_browser, scope='window', window=self.win_id) self._init_command_dispatcher() # We need to set an explicit parent for StatusBar because it does some # show/hide magic immediately which would mean it'd show up as a # window. self.status = bar.StatusBar(win_id=self.win_id, private=private, parent=self) self._add_widgets() self._downloadview.show() self._init_completion() log.init.debug("Initializing modes...") modeman.init(self.win_id, self) self._commandrunner = runners.CommandRunner(self.win_id, partial_match=True) self._keyhint = keyhintwidget.KeyHintView(self.win_id, self) self._add_overlay(self._keyhint, self._keyhint.update_geometry) self._prompt_container = prompt.PromptContainer(self.win_id, self) self._add_overlay(self._prompt_container, self._prompt_container.update_geometry, centered=True, padding=10) objreg.register('prompt-container', self._prompt_container, scope='window', window=self.win_id) self._prompt_container.hide() self._messageview = messageview.MessageView(parent=self) self._add_overlay(self._messageview, self._messageview.update_geometry) self._init_geometry(geometry) self._connect_signals() # When we're here the statusbar might not even really exist yet, so # resizing will fail. Therefore, we use singleShot QTimers to make sure # we defer this until everything else is initialized. QTimer.singleShot(0, self._connect_overlay_signals) config.instance.changed.connect(self._on_config_changed) objreg.get("app").new_window.emit(self) self._set_decoration(config.val.window.hide_decoration) self.state_before_fullscreen = self.windowState() def _init_geometry(self, geometry): """Initialize the window geometry or load it from disk.""" if geometry is not None: self._load_geometry(geometry) elif self.win_id == 0: self._load_state_geometry() else: self._set_default_geometry() log.init.debug("Initial main window geometry: {}".format( self.geometry())) def _add_overlay(self, widget, signal, *, centered=False, padding=0): self._overlays.append((widget, signal, centered, padding)) def _update_overlay_geometries(self): """Update the size/position of all overlays.""" for w, _signal, centered, padding in self._overlays: self._update_overlay_geometry(w, centered, padding) def _update_overlay_geometry(self, widget, centered, padding): """Reposition/resize the given overlay.""" if not widget.isVisible(): return size_hint = widget.sizeHint() if widget.sizePolicy().horizontalPolicy() == QSizePolicy.Expanding: width = self.width() - 2 * padding left = padding else: width = min(size_hint.width(), self.width() - 2 * padding) left = (self.width() - width) / 2 if centered else 0 height_padding = 20 status_position = config.val.statusbar.position if status_position == 'bottom': if self.status.isVisible(): status_height = self.status.height() bottom = self.status.geometry().top() else: status_height = 0 bottom = self.height() top = self.height() - status_height - size_hint.height() top = qtutils.check_overflow(top, 'int', fatal=False) topleft = QPoint(left, max(height_padding, top)) bottomright = QPoint(left + width, bottom) elif status_position == 'top': if self.status.isVisible(): status_height = self.status.height() top = self.status.geometry().bottom() else: status_height = 0 top = 0 topleft = QPoint(left, top) bottom = status_height + size_hint.height() bottom = qtutils.check_overflow(bottom, 'int', fatal=False) bottomright = QPoint(left + width, min(self.height() - height_padding, bottom)) else: raise ValueError("Invalid position {}!".format(status_position)) rect = QRect(topleft, bottomright) log.misc.debug('new geometry for {!r}: {}'.format(widget, rect)) if rect.isValid(): widget.setGeometry(rect) def _init_downloadmanager(self): log.init.debug("Initializing downloads...") qtnetwork_download_manager = objreg.get('qtnetwork-download-manager') try: webengine_download_manager = objreg.get( 'webengine-download-manager') except KeyError: webengine_download_manager = None download_model = downloads.DownloadModel(qtnetwork_download_manager, webengine_download_manager) objreg.register('download-model', download_model, scope='window', window=self.win_id) def _init_completion(self): self._completion = completionwidget.CompletionView(self.win_id, self) cmd = objreg.get('status-command', scope='window', window=self.win_id) completer_obj = completer.Completer(cmd=cmd, win_id=self.win_id, parent=self._completion) self._completion.selection_changed.connect( completer_obj.on_selection_changed) objreg.register('completion', self._completion, scope='window', window=self.win_id) self._add_overlay(self._completion, self._completion.update_geometry) def _init_command_dispatcher(self): dispatcher = commands.CommandDispatcher(self.win_id, self.tabbed_browser) objreg.register('command-dispatcher', dispatcher, scope='window', window=self.win_id) self.tabbed_browser.widget.destroyed.connect( functools.partial(objreg.delete, 'command-dispatcher', scope='window', window=self.win_id)) def __repr__(self): return utils.get_repr(self) @pyqtSlot(str) def _on_config_changed(self, option): """Resize the completion if related config options changed.""" if option == 'statusbar.padding': self._update_overlay_geometries() elif option == 'downloads.position': self._add_widgets() elif option == 'statusbar.position': self._add_widgets() self._update_overlay_geometries() elif option == 'window.hide_decoration': self._set_decoration(config.val.window.hide_decoration) def _add_widgets(self): """Add or readd all widgets to the VBox.""" self._vbox.removeWidget(self.tabbed_browser.widget) self._vbox.removeWidget(self._downloadview) self._vbox.removeWidget(self.status) widgets = [self.tabbed_browser.widget] downloads_position = config.val.downloads.position if downloads_position == 'top': widgets.insert(0, self._downloadview) elif downloads_position == 'bottom': widgets.append(self._downloadview) else: raise ValueError("Invalid position {}!".format(downloads_position)) status_position = config.val.statusbar.position if status_position == 'top': widgets.insert(0, self.status) elif status_position == 'bottom': widgets.append(self.status) else: raise ValueError("Invalid position {}!".format(status_position)) for widget in widgets: self._vbox.addWidget(widget) def _load_state_geometry(self): """Load the geometry from the state file.""" try: data = configfiles.state['geometry']['mainwindow'] geom = base64.b64decode(data, validate=True) except KeyError: # First start self._set_default_geometry() except binascii.Error: log.init.exception("Error while reading geometry") self._set_default_geometry() else: self._load_geometry(geom) def _save_geometry(self): """Save the window geometry to the state config.""" data = bytes(self.saveGeometry()) geom = base64.b64encode(data).decode('ASCII') configfiles.state['geometry']['mainwindow'] = geom def _load_geometry(self, geom): """Load geometry from a bytes object. If loading fails, loads default geometry. """ log.init.debug("Loading mainwindow from {!r}".format(geom)) ok = self.restoreGeometry(geom) if not ok: log.init.warning("Error while loading geometry.") self._set_default_geometry() def _connect_overlay_signals(self): """Connect the resize signal and resize everything once.""" for widget, signal, centered, padding in self._overlays: signal.connect( functools.partial(self._update_overlay_geometry, widget, centered, padding)) self._update_overlay_geometry(widget, centered, padding) def _set_default_geometry(self): """Set some sensible default geometry.""" self.setGeometry(QRect(50, 50, 800, 600)) def _get_object(self, name): """Get an object for this window in the object registry.""" return objreg.get(name, scope='window', window=self.win_id) def _connect_signals(self): """Connect all mainwindow signals.""" status = self._get_object('statusbar') keyparsers = self._get_object('keyparsers') completion_obj = self._get_object('completion') cmd = self._get_object('status-command') message_bridge = self._get_object('message-bridge') mode_manager = self._get_object('mode-manager') # misc self.tabbed_browser.close_window.connect(self.close) mode_manager.entered.connect(hints.on_mode_entered) # status bar mode_manager.entered.connect(status.on_mode_entered) mode_manager.left.connect(status.on_mode_left) mode_manager.left.connect(cmd.on_mode_left) mode_manager.left.connect(message.global_bridge.mode_left) # commands keyparsers[usertypes.KeyMode.normal].keystring_updated.connect( status.keystring.setText) cmd.got_cmd[str].connect(self._commandrunner.run_safely) cmd.got_cmd[str, int].connect(self._commandrunner.run_safely) cmd.returnPressed.connect(self.tabbed_browser.on_cmd_return_pressed) # key hint popup for mode, parser in keyparsers.items(): parser.keystring_updated.connect(functools.partial( self._keyhint.update_keyhint, mode.name)) # messages message.global_bridge.show_message.connect( self._messageview.show_message) message.global_bridge.flush() message.global_bridge.clear_messages.connect( self._messageview.clear_messages) message_bridge.s_set_text.connect(status.set_text) message_bridge.s_maybe_reset_text.connect(status.txt.maybe_reset_text) # statusbar self.tabbed_browser.current_tab_changed.connect(status.on_tab_changed) self.tabbed_browser.cur_progress.connect(status.prog.setValue) self.tabbed_browser.cur_load_finished.connect(status.prog.hide) self.tabbed_browser.cur_load_started.connect( status.prog.on_load_started) self.tabbed_browser.cur_scroll_perc_changed.connect( status.percentage.set_perc) self.tabbed_browser.widget.tab_index_changed.connect( status.tabindex.on_tab_index_changed) self.tabbed_browser.cur_url_changed.connect(status.url.set_url) self.tabbed_browser.cur_url_changed.connect(functools.partial( status.backforward.on_tab_cur_url_changed, tabs=self.tabbed_browser)) self.tabbed_browser.cur_link_hovered.connect(status.url.set_hover_url) self.tabbed_browser.cur_load_status_changed.connect( status.url.on_load_status_changed) self.tabbed_browser.cur_caret_selection_toggled.connect( status.on_caret_selection_toggled) self.tabbed_browser.cur_fullscreen_requested.connect( self._on_fullscreen_requested) self.tabbed_browser.cur_fullscreen_requested.connect(status.maybe_hide) # command input / completion mode_manager.entered.connect(self.tabbed_browser.on_mode_entered) mode_manager.left.connect(self.tabbed_browser.on_mode_left) cmd.clear_completion_selection.connect( completion_obj.on_clear_completion_selection) cmd.hide_completion.connect(completion_obj.hide) def _set_decoration(self, hidden): """Set the visibility of the window decoration via Qt.""" window_flags = Qt.Window refresh_window = self.isVisible() if hidden: window_flags |= Qt.CustomizeWindowHint | Qt.NoDropShadowWindowHint self.setWindowFlags(window_flags) if refresh_window: self.show() @pyqtSlot(bool) def _on_fullscreen_requested(self, on): if not config.val.content.windowed_fullscreen: if on: self.state_before_fullscreen = self.windowState() self.setWindowState( Qt.WindowFullScreen | self.state_before_fullscreen) elif self.isFullScreen(): self.setWindowState(self.state_before_fullscreen) log.misc.debug('on: {}, state before fullscreen: {}'.format( on, debug.qflags_key(Qt, self.state_before_fullscreen))) @cmdutils.register(instance='main-window', scope='window') @pyqtSlot() def close(self): """Close the current window. // Extend close() so we can register it as a command. """ super().close() def resizeEvent(self, e): """Extend resizewindow's resizeEvent to adjust completion. Args: e: The QResizeEvent """ super().resizeEvent(e) self._update_overlay_geometries() self._downloadview.updateGeometry() self.tabbed_browser.widget.tabBar().refresh() def showEvent(self, e): """Extend showEvent to register us as the last-visible-main-window. Args: e: The QShowEvent """ super().showEvent(e) objreg.register('last-visible-main-window', self, update=True) def _do_close(self): """Helper function for closeEvent.""" try: last_visible = objreg.get('last-visible-main-window') if self is last_visible: objreg.delete('last-visible-main-window') except KeyError: pass objreg.get('session-manager').save_last_window_session() self._save_geometry() log.destroy.debug("Closing window {}".format(self.win_id)) self.tabbed_browser.shutdown() def closeEvent(self, e): """Override closeEvent to display a confirmation if needed.""" if crashsignal.is_crashing: e.accept() return tab_count = self.tabbed_browser.widget.count() download_model = objreg.get('download-model', scope='window', window=self.win_id) download_count = download_model.running_downloads() quit_texts = [] # Ask if multiple-tabs are open if 'multiple-tabs' in config.val.confirm_quit and tab_count > 1: quit_texts.append("{} {} open.".format( tab_count, "tab is" if tab_count == 1 else "tabs are")) # Ask if multiple downloads running if 'downloads' in config.val.confirm_quit and download_count > 0: quit_texts.append("{} {} running.".format( download_count, "download is" if download_count == 1 else "downloads are")) # Process all quit messages that user must confirm if quit_texts or 'always' in config.val.confirm_quit: msg = jinja.environment.from_string(""" <ul> {% for text in quit_texts %} <li>{{text}}</li> {% endfor %} </ul> """.strip()).render(quit_texts=quit_texts) confirmed = message.ask('Really quit?', msg, mode=usertypes.PromptMode.yesno, default=True) # Stop asking if the user cancels if not confirmed: log.destroy.debug("Cancelling closing of window {}".format( self.win_id)) e.ignore() return e.accept() self._do_close()
1
23,357
Doesn't this change the behavior (edit: oh, I see, never mind).
qutebrowser-qutebrowser
py
@@ -216,11 +216,15 @@ module Travis sh.cmd 'sudo ln -f -s /lib/i386-linux-gnu/libpam.so.0 /lib/libpam.so.0' sh.cmd 'sudo ln -f -s /usr/lib/i386-lin-gnu/libstdc++.so.6 /usr/lib/i386-linux-gnu/libstdc++.so' end - sh.if '$(uname -m) != ppc64le && $(lsb_release -cs) = trusty' do + sh.elif '$(lsb_release -cs) = trusty' do sh.cmd 'sudo dpkg --add-architecture i386' gemstone_install_linux_dependencies sh.cmd 'sudo ln -f -s /usr/lib/i386-lin-gnu/libstdc++.so.6 /usr/lib/i386-linux-gnu/libstdc++.so' end + sh.elif '$(lsb_release -cs) = xenial || $(lsb_release -cs) = bionic' do + sh.cmd 'sudo dpkg --add-architecture i386' + gemstone_install_linux_dependencies + end end def gemstone_install_linux_dependencies
1
# Copyright (c) 2015-2017 Software Architecture Group (Hasso Plattner Institute) # Copyright (c) 2015-2017 Fabio Niephaus, Google Inc. module Travis module Build class Script class Smalltalk < Script DEFAULTS = {} DEFAULT_REPOSITORY = 'hpi-swa/smalltalkCI' DEFAULT_BRANCH = 'master' HOSTS_FILE = '/etc/hosts' TEMP_HOSTS_FILE = '/tmp/hosts' SYSCTL_FILE = '/etc/sysctl.conf' TEMP_SYSCTL_FILE = '/tmp/sysctl.conf' DEFAULT_32BIT_DEPS = 'libc6:i386 libuuid1:i386 libfreetype6:i386 libssl1.0.0:i386' PHARO_32BIT_DEPS = "#{DEFAULT_32BIT_DEPS} libcairo2:i386" X64_REGEXP = /^[a-zA-Z]*64\-/ def configure super if is_squeak? or is_etoys? install_dependencies(DEFAULT_32BIT_DEPS) elsif is_pharo? or is_moose? install_dependencies(PHARO_32BIT_DEPS) elsif is_gemstone? sh.fold 'gemstone_prepare_dependencies' do sh.echo 'Preparing build for GemStone', ansi: :yellow gemstone_configure_hosts case config[:os] when 'linux' gemstone_prepare_linux_shared_memory gemstone_prepare_linux_dependencies when 'osx' gemstone_prepare_osx_shared_memory end gemstone_prepare_netldi gemstone_prepare_directories end end end def export super sh.export 'TRAVIS_SMALLTALK_CONFIG', smalltalk_config, echo: false sh.export 'TRAVIS_SMALLTALK_VERSION', smalltalk_version, echo: false sh.export 'TRAVIS_SMALLTALK_VM', smalltalk_vm, echo: false end def setup super sh.echo 'Smalltalk for Travis CI is not officially supported, ' \ 'but is community-maintained.', ansi: :green sh.echo 'Please file any issues using the following link', ansi: :green sh.echo ' https://github.com/hpi-swa/smalltalkCI/issues', ansi: :green sh.cmd 'pushd ${TRAVIS_HOME} > /dev/null', echo: false sh.fold 'download_smalltalkci' do sh.echo 'Downloading and extracting smalltalkCI', ansi: :yellow sh.cmd "wget -q -O smalltalkCI.zip #{download_url}" sh.cmd 'unzip -q -o smalltalkCI.zip' sh.cmd 'pushd smalltalkCI-* > /dev/null', echo: false sh.cmd 'source env_vars' sh.cmd 'export PATH="$(pwd)/bin:$PATH"' sh.cmd 'popd > /dev/null; popd > /dev/null', echo: false end end def script super sh.cmd "smalltalkci" end private def smalltalk_ci_repo config.fetch(:smalltalk_edge, {}).fetch(:source, DEFAULT_REPOSITORY) end def smalltalk_ci_branch config.fetch(:smalltalk_edge, {}).fetch(:branch, DEFAULT_BRANCH) end def smalltalk_config config[:smalltalk_config].to_s end def smalltalk_version Array(config[:smalltalk]).first.to_s end def smalltalk_vm config[:smalltalk_vm].to_s end def download_url "https://github.com/#{smalltalk_ci_repo}/archive/#{smalltalk_ci_branch}.zip" end def is_squeak? is_platform?('squeak') end def is_etoys? is_platform?('etoys') end def is_pharo? is_platform?('pharo') end def is_moose? is_platform?('moose') end def is_gemstone? is_platform?('gemstone') end def is_platform?(name) smalltalk_version.downcase.start_with?(name) end def is_linux? config[:os] == 'linux' end def is_64bit? smalltalk_version =~ X64_REGEXP || smalltalk_vm =~ X64_REGEXP end def install_dependencies(deps_32bit) return if !is_linux? || is_64bit? sh.fold 'install_packages' do sh.echo 'Installing dependencies', ansi: :yellow sh.if '$(uname -m) != ppc64le && $(lsb_release -cs) != precise' do sh.cmd 'sudo dpkg --add-architecture i386' end sh.cmd 'travis_apt_get_update', retry: true sh.cmd "sudo apt-get install -y --no-install-recommends #{deps_32bit}", retry: true end end def gemstone_configure_hosts sh.echo 'Configuring /etc/hosts file', ansi: :yellow sh.cmd "sed -e \"s/^\\(127\\.0\\.0\\.1.*\\)$/\\1 $(hostname)/\" #{HOSTS_FILE} | sed -e \"s/^\\(::1.*\\)$/\\1 $(hostname)/\" > #{TEMP_HOSTS_FILE}" sh.cmd "cat #{TEMP_HOSTS_FILE} | sudo tee #{HOSTS_FILE} > /dev/null" end def gemstone_prepare_linux_shared_memory sh.echo 'Setting up shared memory', ansi: :yellow sh.cmd 'SMALLTALK_CI_TOTALMEM=$(($(awk \'/MemTotal:/{print($2);}\' /proc/meminfo) * 1024))' sh.cmd 'SMALLTALK_CI_SHMMAX=$(cat /proc/sys/kernel/shmmax)' sh.cmd 'SMALLTALK_CI_SHMALL=$(cat /proc/sys/kernel/shmall)' sh.cmd 'SMALLTALK_CI_SHMMAX_NEW=$(($SMALLTALK_CI_TOTALMEM * 3/4))' sh.if '$SMALLTALK_CI_SHMMAX_NEW -gt 2147483648' do sh.cmd 'SMALLTALK_CI_SHMMAX_NEW=2147483648' end sh.if '$SMALLTALK_CI_SHMMAX_NEW -gt $SMALLTALK_CI_SHMMAX' do sh.cmd 'sudo bash -c "echo $SMALLTALK_CI_SHMMAX_NEW > /proc/sys/kernel/shmmax"' sh.cmd "sudo /bin/su -c \"echo 'kernel.shmmax=$SMALLTALK_CI_SHMMAX_NEW' >> #{SYSCTL_FILE}\"" end sh.cmd 'SMALLTALK_CI_SHMALL_NEW=$(($SMALLTALK_CI_SHMMAX_NEW / 4096))' sh.if '$SMALLTALK_CI_SHMALL_NEW -gt $SMALLTALK_CI_SHMALL' do sh.cmd 'sudo bash -c "echo $SMALLTALK_CI_SHMALL_NEW > /proc/sys/kernel/shmall"' end sh.if "! -f #{SYSCTL_FILE} || $(grep -sc \"kern.*m\" #{SYSCTL_FILE}) -eq 0" do sh.cmd "echo \"kernelmmax=$(cat /proc/sys/kernel/shmmax)\" >> #{TEMP_SYSCTL_FILE}" sh.cmd "echo \"kernelmall=$(cat /proc/sys/kernel/shmall)\" >> #{TEMP_SYSCTL_FILE}" sh.cmd "sudo bash -c \"cat #{TEMP_SYSCTL_FILE} >> #{SYSCTL_FILE}\"" sh.cmd "/bin/rm -f #{TEMP_SYSCTL_FILE}" end end def gemstone_prepare_osx_shared_memory sh.echo 'Setting up shared memory', ansi: :yellow sh.cmd 'SMALLTALK_CI_TOTALMEM=$(($(sysctl hw.memsize | cut -f2 -d\' \') * 1024))' sh.cmd 'SMALLTALK_CI_SHMMAX=$(sysctl kern.sysv.shmmax | cut -f2 -d\' \')' sh.cmd 'SMALLTALK_CI_SHMALL=$(sysctl kern.sysv.shmall | cut -f2 -d\' \')' sh.cmd 'SMALLTALK_CI_SHMMAX_NEW=$(($SMALLTALK_CI_TOTALMEM * 3/4))' sh.if '$SMALLTALK_CI_SHMMAX_NEW -gt 2147483648' do sh.cmd 'SMALLTALK_CI_SHMMAX_NEW=2147483648' end sh.if '$SMALLTALK_CI_SHMMAX_NEW -gt $SMALLTALK_CI_SHMMAX' do sh.cmd 'sudo sysctl -w kern.sysv.shmmax=$SMALLTALK_CI_SHMMAX_NEW' end sh.cmd 'SMALLTALK_CI_SHMALL_NEW=$(($SMALLTALK_CI_SHMMAX_NEW / 4096))' sh.if '$SMALLTALK_CI_SHMALL_NEW -gt $SMALLTALK_CI_SHMALL' do sh.cmd 'sudo sysctl -w kern.sysv.shmall=$SMALLTALK_CI_SHMALL_NEW' end sh.if "! -f #{SYSCTL_FILE} || $(grep -sc \"kern.*m\" #{SYSCTL_FILE}) -eq 0" do sh.cmd "sysctl kern.sysv.shmmax kern.sysv.shmall kern.sysv.shmmin kern.sysv.shmmni | tr \":\" \"=\" | tr -d \" \" >> #{TEMP_SYSCTL_FILE}" sh.cmd "sudo bash -c \"cat #{TEMP_SYSCTL_FILE} >> #{SYSCTL_FILE}\"" sh.cmd "/bin/rm -f #{TEMP_SYSCTL_FILE}" end end def gemstone_prepare_linux_dependencies sh.if '$(lsb_release -cs) = precise' do gemstone_install_linux_dependencies sh.cmd 'sudo ln -f -s /lib/i386-linux-gnu/libpam.so.0 /lib/libpam.so.0' sh.cmd 'sudo ln -f -s /usr/lib/i386-lin-gnu/libstdc++.so.6 /usr/lib/i386-linux-gnu/libstdc++.so' end sh.if '$(uname -m) != ppc64le && $(lsb_release -cs) = trusty' do sh.cmd 'sudo dpkg --add-architecture i386' gemstone_install_linux_dependencies sh.cmd 'sudo ln -f -s /usr/lib/i386-lin-gnu/libstdc++.so.6 /usr/lib/i386-linux-gnu/libstdc++.so' end end def gemstone_install_linux_dependencies sh.fold 'gemstone_dependencies' do sh.echo 'Installing GemStone dependencies', ansi: :yellow sh.cmd 'travis_apt_get_update', retry: true sh.cmd 'sudo apt-get install -y --no-install-recommends ' + 'libpam0g:i386 libssl1.0.0:i386 gcc-multilib ' + 'libstdc++6:i386 libfreetype6:i386 pstack ' + 'libgl1-mesa-glx:i386 libxcb-dri2-0:i386', retry: true sh.cmd "sudo /bin/su -c \"echo 'kernel.yama.ptrace_scope = 0' >>/etc/sysctl.d/10-ptrace.conf\"" end end def gemstone_prepare_netldi sh.if '$(grep -sc "^gs64ldi" /etc/services) -eq 0' do sh.echo 'Setting up GemStone netldi service port', ansi: :yellow sh.cmd "sudo bash -c 'echo \"gs64ldi 50377/tcp # Gemstone netldi\" >> /etc/services'" end end def gemstone_prepare_directories sh.if '! -e /opt/gemstone' do sh.echo 'Creating /opt/gemstone directory', ansi: :yellow sh.cmd 'sudo mkdir -p /opt/gemstone /opt/gemstone/log /opt/gemstone/locks' sh.cmd 'sudo chown $USER:${GROUPS[0]} /opt/gemstone /opt/gemstone/log /opt/gemstone/locks' sh.cmd 'sudo chmod 770 /opt/gemstone /opt/gemstone/log /opt/gemstone/locks' end end end end end end
1
17,289
I think you need a `do` at the end of this line.
travis-ci-travis-build
rb
@@ -5,7 +5,7 @@ <%= link_to(t('blacklight.search.sort.label', :field =>current_sort_field.label), "#") %> <span class="caret"></span> <ul> <%- blacklight_config.sort_fields.each do |sort_key, field| %> - <li><%= link_to(field.label, url_for(params_for_search(:sort => sort_key))) %></li> + <li><%= link_to(field.label, params_for_search(params.merge(:sort => sort_key))) %></li> <%- end -%> </ul> </li>
1
<% if show_sort_and_per_page? and !blacklight_config.sort_fields.blank? %> <div id="sort-dropdown" class="dropdown pull-right hidden-phone"> <ul class="css-dropdown"> <li class="btn"> <%= link_to(t('blacklight.search.sort.label', :field =>current_sort_field.label), "#") %> <span class="caret"></span> <ul> <%- blacklight_config.sort_fields.each do |sort_key, field| %> <li><%= link_to(field.label, url_for(params_for_search(:sort => sort_key))) %></li> <%- end -%> </ul> </li> </ul> </div> <% end %>
1
4,781
Not passing in a `:params` key here.
projectblacklight-blacklight
rb
@@ -308,6 +308,7 @@ func (r *ReconcileClusterClaim) reconcileForDeletedCluster(claim *hivev1.Cluster func (r *ReconcileClusterClaim) reconcileForNewAssignment(claim *hivev1.ClusterClaim, cd *hivev1.ClusterDeployment, logger log.FieldLogger) (reconcile.Result, error) { logger.Info("cluster assigned to claim") cd.Spec.ClusterPoolRef.ClaimName = claim.Name + cd.Spec.PowerState = hivev1.RunningClusterPowerState if err := r.Update(context.Background(), cd); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not set claim for ClusterDeployment") return reconcile.Result{}, err
1
package clusterclaim import ( "context" "reflect" "time" "github.com/pkg/errors" log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" hivemetrics "github.com/openshift/hive/pkg/controller/metrics" controllerutils "github.com/openshift/hive/pkg/controller/utils" "github.com/openshift/hive/pkg/resource" ) const ( ControllerName = "clusterclaim" finalizer = "hive.openshift.io/claim" hiveClaimOwnerRoleName = "hive-claim-owner" hiveClaimOwnerRoleBindingName = "hive-claim-owner" ) // Add creates a new ClusterClaim Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager) error { return AddToManager(mgr, NewReconciler(mgr)) } // NewReconciler returns a new ReconcileClusterClaim func NewReconciler(mgr manager.Manager) *ReconcileClusterClaim { logger := log.WithField("controller", ControllerName) return &ReconcileClusterClaim{ Client: controllerutils.NewClientWithMetricsOrDie(mgr, ControllerName), logger: logger, } } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler func AddToManager(mgr manager.Manager, r *ReconcileClusterClaim) error { // Create a new controller c, err := controller.New("clusterclaim-controller", mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: controllerutils.GetConcurrentReconciles()}) if err != nil { return err } // Watch for changes to ClusterClaim if err := c.Watch(&source.Kind{Type: &hivev1.ClusterClaim{}}, &handler.EnqueueRequestForObject{}); err != nil { return err } // Watch for changes to ClusterDeployment if err := c.Watch( &source.Kind{Type: &hivev1.ClusterDeployment{}}, &handler.EnqueueRequestsFromMapFunc{ ToRequests: handler.ToRequestsFunc(requestsForClusterDeployment), }, ); err != nil { return err } // Watch for changes to the hive-claim-owner Role if err := c.Watch( &source.Kind{Type: &rbacv1.Role{}}, &handler.EnqueueRequestsFromMapFunc{ ToRequests: requestsForRBACResources(r.Client, hiveClaimOwnerRoleName, r.logger), }, ); err != nil { return err } // Watch for changes to the hive-claim-owner RoleBinding if err := c.Watch( &source.Kind{Type: &rbacv1.Role{}}, &handler.EnqueueRequestsFromMapFunc{ ToRequests: requestsForRBACResources(r.Client, hiveClaimOwnerRoleBindingName, r.logger), }, ); err != nil { return err } return nil } func claimForClusterDeployment(cd *hivev1.ClusterDeployment) *types.NamespacedName { if cd.Spec.ClusterPoolRef == nil { return nil } if cd.Spec.ClusterPoolRef.ClaimName == "" { return nil } return &types.NamespacedName{ Namespace: cd.Spec.ClusterPoolRef.Namespace, Name: cd.Spec.ClusterPoolRef.ClaimName, } } func requestsForClusterDeployment(o handler.MapObject) []reconcile.Request { cd, ok := o.Object.(*hivev1.ClusterDeployment) if !ok { return nil } claim := claimForClusterDeployment(cd) if claim == nil { return nil } return []reconcile.Request{{NamespacedName: *claim}} } func requestsForRBACResources(c client.Client, resourceName string, logger log.FieldLogger) handler.ToRequestsFunc { return func(o handler.MapObject) []reconcile.Request { if o.Meta.GetName() != resourceName { return nil } clusterName := o.Meta.GetNamespace() cd := &hivev1.ClusterDeployment{} if err := c.Get(context.Background(), client.ObjectKey{Namespace: clusterName, Name: clusterName}, cd); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "failed to get ClusterDeployment for RBAC resource") return nil } claim := claimForClusterDeployment(cd) if claim == nil { return nil } return []reconcile.Request{{NamespacedName: *claim}} } } var _ reconcile.Reconciler = &ReconcileClusterClaim{} // ReconcileClusterClaim reconciles a CLusterClaim object type ReconcileClusterClaim struct { client.Client logger log.FieldLogger } // Reconcile reconciles a ClusterClaim. func (r *ReconcileClusterClaim) Reconcile(request reconcile.Request) (reconcile.Result, error) { start := time.Now() logger := r.logger.WithField("clusterClaim", request.NamespacedName) logger.Infof("reconciling cluster claim") defer func() { dur := time.Since(start) hivemetrics.MetricControllerReconcileTime.WithLabelValues(ControllerName).Observe(dur.Seconds()) logger.WithField("elapsed", dur).Info("reconcile complete") }() // Fetch the ClusterClaim instance claim := &hivev1.ClusterClaim{} err := r.Get(context.TODO(), request.NamespacedName, claim) if err != nil { if apierrors.IsNotFound(err) { logger.Info("claim not found") return reconcile.Result{}, nil } // Error reading the object - requeue the request. log.WithError(err).Error("error getting ClusterClaim") return reconcile.Result{}, err } if claim.DeletionTimestamp != nil { return r.reconcileDeletedClaim(claim, logger) } // Add finalizer if not already present if !controllerutils.HasFinalizer(claim, finalizer) { logger.Debug("adding finalizer to ClusterClaim") controllerutils.AddFinalizer(claim, finalizer) if err := r.Update(context.Background(), claim); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "error adding finalizer to ClusterClaim") return reconcile.Result{}, err } } clusterName := claim.Spec.Namespace if clusterName == "" { logger.Debug("claim has not yet been assigned a cluster") return reconcile.Result{}, nil } logger = logger.WithField("cluster", clusterName) cd := &hivev1.ClusterDeployment{} switch err := r.Get(context.Background(), client.ObjectKey{Namespace: clusterName, Name: clusterName}, cd); { case apierrors.IsNotFound(err): return r.reconcileForDeletedCluster(claim, logger) case err != nil: logger.Log(controllerutils.LogLevel(err), "error getting ClusterDeployment") return reconcile.Result{}, err } switch cd.Spec.ClusterPoolRef.ClaimName { case "": return r.reconcileForNewAssignment(claim, cd, logger) case claim.Name: return r.reconcileForExistingAssignment(claim, cd, logger) default: return r.reconcileForAssignmentConflict(claim, logger) } } func (r *ReconcileClusterClaim) reconcileDeletedClaim(claim *hivev1.ClusterClaim, logger log.FieldLogger) (reconcile.Result, error) { if !controllerutils.HasFinalizer(claim, finalizer) { return reconcile.Result{}, nil } if err := r.cleanupResources(claim, logger); err != nil { return reconcile.Result{}, err } logger.Info("removing finalizer from ClusterClaim") controllerutils.DeleteFinalizer(claim, finalizer) if err := r.Update(context.Background(), claim); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not remove finalizer from ClusterClaim") return reconcile.Result{}, err } return reconcile.Result{}, nil } func (r *ReconcileClusterClaim) cleanupResources(claim *hivev1.ClusterClaim, logger log.FieldLogger) error { clusterName := claim.Spec.Namespace if clusterName == "" { logger.Info("no resources to clean up since claim was never assigned a cluster") return nil } logger = logger.WithField("cluster", clusterName) cd := &hivev1.ClusterDeployment{} switch err := r.Get(context.Background(), client.ObjectKey{Namespace: clusterName, Name: clusterName}, cd); { case apierrors.IsNotFound(err): logger.Info("cluster does not exist") return nil case err != nil: logger.WithError(err).Log(controllerutils.LogLevel(err), "error getting ClusterDeployment") return err } if poolRef := cd.Spec.ClusterPoolRef; poolRef == nil || poolRef.Namespace != claim.Namespace || poolRef.ClaimName != claim.Name { logger.Info("assigned cluster was not claimed") return nil } // Delete RoleBinding if err := resource.DeleteAnyExistingObject( r, client.ObjectKey{Namespace: clusterName, Name: hiveClaimOwnerRoleBindingName}, &rbacv1.RoleBinding{}, logger, ); err != nil { return err } // Delete Role if err := resource.DeleteAnyExistingObject( r, client.ObjectKey{Namespace: clusterName, Name: hiveClaimOwnerRoleName}, &rbacv1.Role{}, logger, ); err != nil { return err } // Delete ClusterDeployment if cd.DeletionTimestamp == nil { logger.Info("deleting clusterDeployment") if err := r.Delete(context.Background(), cd); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "error deleting ClusterDeployment") return err } } return nil } func (r *ReconcileClusterClaim) reconcileForDeletedCluster(claim *hivev1.ClusterClaim, logger log.FieldLogger) (reconcile.Result, error) { logger.Debug("assigned cluster has been deleted") conds, changed := controllerutils.SetClusterClaimConditionWithChangeCheck( claim.Status.Conditions, hivev1.ClusterClaimClusterDeletedCondition, corev1.ConditionTrue, "ClusterDeleted", "Assigned cluster has been deleted", controllerutils.UpdateConditionIfReasonOrMessageChange, ) if changed { claim.Status.Conditions = conds if err := r.Status().Update(context.Background(), claim); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update status") return reconcile.Result{}, err } } return reconcile.Result{}, nil } func (r *ReconcileClusterClaim) reconcileForNewAssignment(claim *hivev1.ClusterClaim, cd *hivev1.ClusterDeployment, logger log.FieldLogger) (reconcile.Result, error) { logger.Info("cluster assigned to claim") cd.Spec.ClusterPoolRef.ClaimName = claim.Name if err := r.Update(context.Background(), cd); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not set claim for ClusterDeployment") return reconcile.Result{}, err } return r.reconcileForExistingAssignment(claim, cd, logger) } func (r *ReconcileClusterClaim) reconcileForExistingAssignment(claim *hivev1.ClusterClaim, cd *hivev1.ClusterDeployment, logger log.FieldLogger) (reconcile.Result, error) { logger.Debug("claim has existing cluster assignment") if err := r.createRBAC(claim, cd, logger); err != nil { return reconcile.Result{}, err } conds, changed := controllerutils.SetClusterClaimConditionWithChangeCheck( claim.Status.Conditions, hivev1.ClusterClaimPendingCondition, corev1.ConditionFalse, "ClusterClaimed", "Cluster claimed", controllerutils.UpdateConditionIfReasonOrMessageChange, ) if changed { claim.Status.Conditions = conds if err := r.Status().Update(context.Background(), claim); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update status of ClusterClaim") return reconcile.Result{}, err } } return reconcile.Result{}, nil } func (r *ReconcileClusterClaim) reconcileForAssignmentConflict(claim *hivev1.ClusterClaim, logger log.FieldLogger) (reconcile.Result, error) { logger.Info("claim assigned a cluster that has already been claimed by another ClusterClaim") claim.Spec.Namespace = "" claim.Status.Conditions = controllerutils.SetClusterClaimCondition( claim.Status.Conditions, hivev1.ClusterClaimPendingCondition, corev1.ConditionTrue, "AssignmentConflict", "Assigned cluster was claimed by a different ClusterClaim", controllerutils.UpdateConditionIfReasonOrMessageChange, ) if err := r.Update(context.Background(), claim); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update status of ClusterClaim") return reconcile.Result{}, err } return reconcile.Result{}, nil } func (r *ReconcileClusterClaim) createRBAC(claim *hivev1.ClusterClaim, cd *hivev1.ClusterDeployment, logger log.FieldLogger) error { if len(claim.Spec.Subjects) == 0 { logger.Debug("not creating RBAC since claim does not specify any subjects") return nil } if cd.Spec.ClusterMetadata == nil { return errors.New("ClusterDeployment does not have ClusterMetadata") } if err := r.applyHiveClaimOwnerRole(claim, cd, logger); err != nil { return err } if err := r.applyHiveClaimOwnerRoleBinding(claim, cd, logger); err != nil { return err } return nil } func (r *ReconcileClusterClaim) applyHiveClaimOwnerRole(claim *hivev1.ClusterClaim, cd *hivev1.ClusterDeployment, logger log.FieldLogger) error { desiredRole := &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Namespace: cd.Namespace, Name: hiveClaimOwnerRoleName, }, Rules: []rbacv1.PolicyRule{ // Allow full access to all Hive resources { APIGroups: []string{hivev1.HiveAPIGroup}, Resources: []string{rbacv1.ResourceAll}, Verbs: []string{rbacv1.VerbAll}, }, // Allow read access to the kubeconfig and admin password secrets { APIGroups: []string{corev1.GroupName}, Resources: []string{"secrets"}, ResourceNames: []string{ cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name, cd.Spec.ClusterMetadata.AdminPasswordSecretRef.Name, }, Verbs: []string{"get"}, }, }, } observedRole := &rbacv1.Role{} updateRole := func() bool { if reflect.DeepEqual(desiredRole.Rules, observedRole.Rules) { return false } observedRole.Rules = desiredRole.Rules return true } if err := r.applyResource(desiredRole, observedRole, updateRole, logger); err != nil { return err } return nil } func (r *ReconcileClusterClaim) applyHiveClaimOwnerRoleBinding(claim *hivev1.ClusterClaim, cd *hivev1.ClusterDeployment, logger log.FieldLogger) error { desiredRoleBinding := &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Namespace: cd.Namespace, Name: hiveClaimOwnerRoleBindingName, }, Subjects: claim.Spec.Subjects, RoleRef: rbacv1.RoleRef{ APIGroup: rbacv1.GroupName, Kind: "Role", Name: hiveClaimOwnerRoleName, }, } observedRoleBinding := &rbacv1.RoleBinding{} updateRole := func() bool { if reflect.DeepEqual(desiredRoleBinding.Subjects, observedRoleBinding.Subjects) && reflect.DeepEqual(desiredRoleBinding.RoleRef, observedRoleBinding.RoleRef) { return false } observedRoleBinding.Subjects = desiredRoleBinding.Subjects observedRoleBinding.RoleRef = desiredRoleBinding.RoleRef return true } if err := r.applyResource(desiredRoleBinding, observedRoleBinding, updateRole, logger); err != nil { return err } return nil } func (r *ReconcileClusterClaim) applyResource(desired, observed hivev1.MetaRuntimeObject, update func() bool, logger log.FieldLogger) error { key := client.ObjectKey{ Namespace: desired.GetNamespace(), Name: desired.GetName(), } logger = logger.WithField("resource", key) switch err := r.Get(context.Background(), key, observed); { case apierrors.IsNotFound(err): logger.Info("creating resource") if err := r.Create(context.Background(), desired); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not create resource") return errors.Wrap(err, "could not create resource") } return nil case err != nil: logger.WithError(err).Log(controllerutils.LogLevel(err), "could not get resource") return errors.Wrap(err, "could not get resource") } if !update() { logger.Debug("resource is up-to-date") return nil } logger.Info("updating resource") if err := r.Update(context.Background(), observed); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update resource") return errors.Wrap(err, "could not update resource") } return nil }
1
13,496
Feels like this should be an option and not the default. Leaving them running would give you immediate response to claims, which might be what some people would want. Hibernation is AWS only right now and probably other clouds will just disregard the setting but arguably validation should be rejecting attempts to create / update powerstate on gcp/azure/etc. I'd pitch we put this onto the pool spec and make you opt in.
openshift-hive
go
@@ -5,13 +5,14 @@ // The conditions looks weird, but it seems like _OR_GREATER is not supported yet in all environments // We can trim all the additional conditions when this is fixed -#if NETCOREAPP3_0 || NETCOREAPP3_1 || NET5_0 +#if NETCOREAPP3_0 || NETCOREAPP3_1 || NET5_0 || NET6_0 using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Threading.Tasks; +using Datadog.Trace.IntegrationTestHelpers; using Xunit; using Xunit.Abstractions;
1
// <copyright file="AspNetCore5.cs" company="Datadog"> // Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. // </copyright> // The conditions looks weird, but it seems like _OR_GREATER is not supported yet in all environments // We can trim all the additional conditions when this is fixed #if NETCOREAPP3_0 || NETCOREAPP3_1 || NET5_0 using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Threading.Tasks; using Xunit; using Xunit.Abstractions; namespace Datadog.Trace.Security.IntegrationTests { public class AspNetCore5 : AspNetBase, IDisposable { public AspNetCore5(ITestOutputHelper outputHelper) : base("AspNetCore5", outputHelper, "/shutdown") { } // NOTE: by integrating the latest version of the WAF, blocking was disabled, as it does not support blocking yet [Theory] [InlineData(true, true, HttpStatusCode.OK)] [InlineData(true, false, HttpStatusCode.OK)] [InlineData(false, true, HttpStatusCode.OK)] [InlineData(false, false, HttpStatusCode.OK)] [InlineData(true, false, HttpStatusCode.OK, "/Health/?test&[$slice]")] [Trait("RunOnWindows", "True")] [Trait("Category", "ArmUnsupported")] public async Task TestSecurity(bool enableSecurity, bool enableBlocking, HttpStatusCode expectedStatusCode, string url = DefaultAttackUrl) { var agent = await RunOnSelfHosted(enableSecurity, enableBlocking); await TestBlockedRequestAsync(agent, enableSecurity, expectedStatusCode, 5, url: url, assertOnSpans: new Action<TestHelpers.MockTracerAgent.Span>[] { s => Assert.Equal("aspnet_core.request", s.Name), s => Assert.Equal("Samples.AspNetCore5", s.Service), s => Assert.Equal("web", s.Type), s => { var securityTags = new Dictionary<string, string> { { "network.client.ip", "127.0.0.1" }, { "http.response.headers.content-type", "text/plain; charset=utf-8" }, }; foreach (var kvp in securityTags) { Assert.True(s.Tags.TryGetValue(kvp.Key, out var tagValue), $"The tag {kvp.Key} was not found"); Assert.Equal(kvp.Value, tagValue); } }, }); } } } #endif
1
25,568
I think I saw somewhere that it has been fixed and that you can use #NETCOREAPP3_0_OR_GREATER but I may be wrong.
DataDog-dd-trace-dotnet
.cs
@@ -236,6 +236,15 @@ func (r *RollDPoS) CurrentState() fsm.State { return r.cfsm.CurrentState() } +// Activate activates or pauses the roll-DPoS consensus. When it is deactivated, the node will finish the current +// consensus round if it is doing the work and then return the the initial state +func (r *RollDPoS) Activate(active bool) { r.ctx.Activate(active) } + +// Active is true if the roll-DPoS consensus is active, or false if it is stand-by +func (r *RollDPoS) Active() bool { + return r.ctx.Active() || r.cfsm.CurrentState() != consensusfsm.InitState +} + // Builder is the builder for RollDPoS type Builder struct { cfg config.Config
1
// Copyright (c) 2019 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package rolldpos import ( "context" "github.com/facebookgo/clock" "github.com/iotexproject/go-fsm" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/iotexproject/iotex-core/action/protocol/rolldpos" "github.com/iotexproject/iotex-core/actpool" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/consensus/consensusfsm" "github.com/iotexproject/iotex-core/consensus/scheme" "github.com/iotexproject/iotex-core/endorsement" "github.com/iotexproject/iotex-core/explorer/idl/explorer" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/pkg/log" "github.com/iotexproject/iotex-core/protogen/iotextypes" ) var ( timeSlotMtc = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "iotex_consensus_time_slot", Help: "Consensus time slot", }, []string{}, ) blockIntervalMtc = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "iotex_consensus_block_interval", Help: "Consensus block interval", }, []string{}, ) ) func init() { prometheus.MustRegister(timeSlotMtc) prometheus.MustRegister(blockIntervalMtc) } var ( // ErrNewRollDPoS indicates the error of constructing RollDPoS ErrNewRollDPoS = errors.New("error when constructing RollDPoS") // ErrZeroDelegate indicates seeing 0 delegates in the network ErrZeroDelegate = errors.New("zero delegates in the network") // ErrNotEnoughCandidates indicates there are not enough candidates from the candidate pool ErrNotEnoughCandidates = errors.New("Candidate pool does not have enough candidates") ) type blockWrapper struct { *block.Block round uint32 } func (bw *blockWrapper) Hash() []byte { hash := bw.HashBlock() return hash[:] } func (bw *blockWrapper) Endorser() string { return bw.ProducerAddress() } func (bw *blockWrapper) Round() uint32 { return bw.round } // RollDPoS is Roll-DPoS consensus main entrance type RollDPoS struct { cfsm *consensusfsm.ConsensusFSM ctx *rollDPoSCtx ready chan interface{} } // Start starts RollDPoS consensus func (r *RollDPoS) Start(ctx context.Context) error { if err := r.cfsm.Start(ctx); err != nil { return errors.Wrap(err, "error when starting the consensus FSM") } if _, err := r.cfsm.BackToPrepare(r.ctx.cfg.Delay); err != nil { return err } close(r.ready) return nil } // Stop stops RollDPoS consensus func (r *RollDPoS) Stop(ctx context.Context) error { return errors.Wrap(r.cfsm.Stop(ctx), "error when stopping the consensus FSM") } // HandleConsensusMsg handles incoming consensus message func (r *RollDPoS) HandleConsensusMsg(msg *iotextypes.ConsensusMessage) error { <-r.ready consensusHeight := r.ctx.Height() switch { case consensusHeight == 0: log.Logger("consensus").Debug("consensus component is not ready yet") return nil case msg.Height < consensusHeight: log.Logger("consensus").Debug( "old consensus message", zap.Uint64("consensusHeight", consensusHeight), zap.Uint64("msgHeight", msg.Height), ) return nil case msg.Height > consensusHeight+1: log.Logger("consensus").Debug( "future consensus message", zap.Uint64("consensusHeight", consensusHeight), zap.Uint64("msgHeight", msg.Height), ) return nil } endorsedMessage := &EndorsedConsensusMessage{} if err := endorsedMessage.LoadProto(msg); err != nil { return errors.Wrapf(err, "failed to decode endorsed consensus message") } if !endorsement.VerifyEndorsedDocument(endorsedMessage) { return errors.New("failed to verify signature in endorsement") } en := endorsedMessage.Endorsement() switch consensusMessage := endorsedMessage.Document().(type) { case *blockProposal: if err := r.ctx.CheckBlockProposer(endorsedMessage.Height(), consensusMessage, en); err != nil { return errors.Wrap(err, "failed to verify block proposal") } r.cfsm.ProduceReceiveBlockEvent(endorsedMessage) return nil case *ConsensusVote: if err := r.ctx.CheckVoteEndorser(endorsedMessage.Height(), consensusMessage, en); err != nil { return errors.Wrapf(err, "failed to verify vote") } switch consensusMessage.Topic() { case PROPOSAL: r.cfsm.ProduceReceiveProposalEndorsementEvent(endorsedMessage) case LOCK: r.cfsm.ProduceReceiveLockEndorsementEvent(endorsedMessage) case COMMIT: r.cfsm.ProduceReceivePreCommitEndorsementEvent(endorsedMessage) } return nil // TODO: response block by hash, requestBlock.BlockHash default: return errors.Errorf("Invalid consensus message type %+v", msg) } } // Calibrate called on receive a new block not via consensus func (r *RollDPoS) Calibrate(height uint64) { r.cfsm.Calibrate(height) } // ValidateBlockFooter validates the signatures in the block footer func (r *RollDPoS) ValidateBlockFooter(blk *block.Block) error { round, err := r.ctx.RoundCalc().NewRound(blk.Height(), blk.Timestamp()) if err != nil { return err } if round.Proposer() != blk.ProducerAddress() { return errors.Errorf( "block proposer %s is invalid, %s expected", blk.ProducerAddress(), round.proposer, ) } if err := round.AddBlock(blk); err != nil { return err } blkHash := blk.HashBlock() for _, en := range blk.Endorsements() { if err := round.AddVoteEndorsement( NewConsensusVote(blkHash[:], COMMIT), en, ); err != nil { return err } } if !round.EndorsedByMajority(blkHash[:], []ConsensusVoteTopic{COMMIT}) { return ErrInsufficientEndorsements } return nil } // Metrics returns RollDPoS consensus metrics func (r *RollDPoS) Metrics() (scheme.ConsensusMetrics, error) { var metrics scheme.ConsensusMetrics height := r.ctx.chain.TipHeight() round, err := r.ctx.RoundCalc().NewRound(height+1, r.ctx.clock.Now()) if err != nil { return metrics, errors.Wrap(err, "error when calculating round") } // Get all candidates candidates, err := r.ctx.chain.CandidatesByHeight(height) if err != nil { return metrics, errors.Wrap(err, "error when getting all candidates") } candidateAddresses := make([]string, len(candidates)) for i, c := range candidates { candidateAddresses[i] = c.Address } return scheme.ConsensusMetrics{ LatestEpoch: round.EpochNum(), LatestHeight: height, LatestDelegates: round.Delegates(), LatestBlockProducer: r.ctx.round.proposer, Candidates: candidateAddresses, }, nil } // NumPendingEvts returns the number of pending events func (r *RollDPoS) NumPendingEvts() int { return r.cfsm.NumPendingEvents() } // CurrentState returns the current state func (r *RollDPoS) CurrentState() fsm.State { return r.cfsm.CurrentState() } // Builder is the builder for RollDPoS type Builder struct { cfg config.Config // TODO: we should use keystore in the future encodedAddr string priKey keypair.PrivateKey chain blockchain.Blockchain actPool actpool.ActPool broadcastHandler scheme.Broadcast clock clock.Clock rootChainAPI explorer.Explorer rp *rolldpos.Protocol candidatesByHeightFunc CandidatesByHeightFunc } // NewRollDPoSBuilder instantiates a Builder instance func NewRollDPoSBuilder() *Builder { return &Builder{} } // SetConfig sets config func (b *Builder) SetConfig(cfg config.Config) *Builder { b.cfg = cfg return b } // SetAddr sets the address and key pair for signature func (b *Builder) SetAddr(encodedAddr string) *Builder { b.encodedAddr = encodedAddr return b } // SetPriKey sets the private key func (b *Builder) SetPriKey(priKey keypair.PrivateKey) *Builder { b.priKey = priKey return b } // SetBlockchain sets the blockchain APIs func (b *Builder) SetBlockchain(chain blockchain.Blockchain) *Builder { b.chain = chain return b } // SetActPool sets the action pool APIs func (b *Builder) SetActPool(actPool actpool.ActPool) *Builder { b.actPool = actPool return b } // SetBroadcast sets the broadcast callback func (b *Builder) SetBroadcast(broadcastHandler scheme.Broadcast) *Builder { b.broadcastHandler = broadcastHandler return b } // SetClock sets the clock func (b *Builder) SetClock(clock clock.Clock) *Builder { b.clock = clock return b } // SetRootChainAPI sets root chain API func (b *Builder) SetRootChainAPI(api explorer.Explorer) *Builder { b.rootChainAPI = api return b } // SetCandidatesByHeightFunc sets candidatesByHeightFunc func (b *Builder) SetCandidatesByHeightFunc( candidatesByHeightFunc CandidatesByHeightFunc, ) *Builder { b.candidatesByHeightFunc = candidatesByHeightFunc return b } // RegisterProtocol sets the rolldpos protocol func (b *Builder) RegisterProtocol(rp *rolldpos.Protocol) *Builder { b.rp = rp return b } // Build builds a RollDPoS consensus module func (b *Builder) Build() (*RollDPoS, error) { if b.chain == nil { return nil, errors.Wrap(ErrNewRollDPoS, "blockchain APIs is nil") } if b.actPool == nil { return nil, errors.Wrap(ErrNewRollDPoS, "action pool APIs is nil") } if b.broadcastHandler == nil { return nil, errors.Wrap(ErrNewRollDPoS, "broadcast callback is nil") } if b.clock == nil { b.clock = clock.New() } ctx := newRollDPoSCtx( b.cfg.Consensus.RollDPoS, b.cfg.Genesis.Blockchain.BlockInterval, b.cfg.Consensus.RollDPoS.ToleratedOvertime, b.cfg.Genesis.TimeBasedRotation, b.rootChainAPI, b.chain, b.actPool, b.rp, b.broadcastHandler, b.candidatesByHeightFunc, b.encodedAddr, b.priKey, b.clock, ) cfsm, err := consensusfsm.NewConsensusFSM(b.cfg.Consensus.RollDPoS.FSM, ctx, b.clock) if err != nil { return nil, errors.Wrap(err, "error when constructing the consensus FSM") } return &RollDPoS{ cfsm: cfsm, ctx: ctx, ready: make(chan interface{}), }, nil }
1
17,167
what's the second part for?
iotexproject-iotex-core
go
@@ -79,7 +79,7 @@ public class DirectSolrSpellCheckerTest extends SolrTestCaseJ4 { return null; }); } - + @Test public void testOnlyMorePopularWithExtendedResults() throws Exception { assertQ(req("q", "teststop:fox", "qt", "/spellCheckCompRH", SpellCheckComponent.COMPONENT_NAME, "true", SpellingParams.SPELLCHECK_DICT, "direct", SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true", SpellingParams.SPELLCHECK_ONLY_MORE_POPULAR, "true"),
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.spelling; import java.util.Collection; import java.util.Map; import org.apache.lucene.util.LuceneTestCase.SuppressTempFileChecks; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.params.SpellingParams; import org.apache.solr.common.util.NamedList; import org.apache.solr.core.SolrCore; import org.apache.solr.handler.component.SpellCheckComponent; import org.junit.BeforeClass; import org.junit.Test; /** * Simple tests for {@link DirectSolrSpellChecker} */ @SuppressTempFileChecks(bugUrl = "https://issues.apache.org/jira/browse/SOLR-1877 Spellcheck IndexReader leak bug?") public class DirectSolrSpellCheckerTest extends SolrTestCaseJ4 { private static SpellingQueryConverter queryConverter; @BeforeClass public static void beforeClass() throws Exception { initCore("solrconfig-spellcheckcomponent.xml","schema.xml"); //Index something with a title assertNull(h.validateUpdate(adoc("id", "0", "teststop", "This is a title"))); assertNull(h.validateUpdate(adoc("id", "1", "teststop", "The quick reb fox jumped over the lazy brown dogs."))); assertNull(h.validateUpdate(adoc("id", "2", "teststop", "This is a Solr"))); assertNull(h.validateUpdate(adoc("id", "3", "teststop", "solr foo"))); assertNull(h.validateUpdate(adoc("id", "4", "teststop", "another foo"))); assertNull(h.validateUpdate(commit())); queryConverter = new SimpleQueryConverter(); queryConverter.init(new NamedList()); } @Test public void test() throws Exception { DirectSolrSpellChecker checker = new DirectSolrSpellChecker(); NamedList spellchecker = new NamedList(); spellchecker.add("classname", DirectSolrSpellChecker.class.getName()); spellchecker.add(SolrSpellChecker.FIELD, "teststop"); spellchecker.add(DirectSolrSpellChecker.MINQUERYLENGTH, 2); // we will try "fob" SolrCore core = h.getCore(); checker.init(spellchecker, core); h.getCore().withSearcher(searcher -> { Collection<Token> tokens = queryConverter.convert("fob"); SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.getIndexReader()); SpellingResult result = checker.getSuggestions(spellOpts); assertTrue("result is null and it shouldn't be", result != null); Map<String, Integer> suggestions = result.get(tokens.iterator().next()); Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next(); assertTrue(entry.getKey() + " is not equal to " + "foo", entry.getKey().equals("foo") == true); assertFalse(entry.getValue() + " equals: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO); spellOpts.tokens = queryConverter.convert("super"); result = checker.getSuggestions(spellOpts); assertTrue("result is null and it shouldn't be", result != null); suggestions = result.get(tokens.iterator().next()); assertTrue("suggestions is not null and it should be", suggestions == null); return null; }); } @Test public void testOnlyMorePopularWithExtendedResults() throws Exception { assertQ(req("q", "teststop:fox", "qt", "/spellCheckCompRH", SpellCheckComponent.COMPONENT_NAME, "true", SpellingParams.SPELLCHECK_DICT, "direct", SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true", SpellingParams.SPELLCHECK_ONLY_MORE_POPULAR, "true"), "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='fox']/int[@name='origFreq']=1", "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='fox']/arr[@name='suggestion']/lst/str[@name='word']='foo'", "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='fox']/arr[@name='suggestion']/lst/int[@name='freq']=2", "//lst[@name='spellcheck']/bool[@name='correctlySpelled']='true'" ); } }
1
31,682
It's not clear to me what the "super" test above is for. As far as I can see, the test runs a spellcheck for "super" but then uses "fob" as the index into suggestions, which will never find an entry.
apache-lucene-solr
java
@@ -24,7 +24,7 @@ import ( "google.golang.org/grpc/status" logfilter "github.com/iotexproject/iotex-core/api/logfilter" - "github.com/iotexproject/iotex-core/ioctl/util" + ioctlUtil "github.com/iotexproject/iotex-core/ioctl/util" "github.com/iotexproject/iotex-core/pkg/log" )
1
package api import ( "context" "encoding/hex" "encoding/json" "fmt" "math/big" "strconv" "strings" "time" "github.com/ethereum/go-ethereum/common" "github.com/go-redis/redis/v8" "github.com/iotexproject/go-pkgs/cache/ttl" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-proto/golang/iotexapi" "github.com/iotexproject/iotex-proto/golang/iotextypes" "github.com/pkg/errors" "github.com/tidwall/gjson" "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" logfilter "github.com/iotexproject/iotex-core/api/logfilter" "github.com/iotexproject/iotex-core/ioctl/util" "github.com/iotexproject/iotex-core/pkg/log" ) type ( blockObject struct { Author string `json:"author"` Number string `json:"number"` Hash string `json:"hash"` ParentHash string `json:"parentHash"` Sha3Uncles string `json:"sha3Uncles"` LogsBloom string `json:"logsBloom"` TransactionsRoot string `json:"transactionsRoot"` StateRoot string `json:"stateRoot"` ReceiptsRoot string `json:"receiptsRoot"` Miner string `json:"miner"` Difficulty string `json:"difficulty"` TotalDifficulty string `json:"totalDifficulty"` ExtraData string `json:"extraData"` Size string `json:"size"` GasLimit string `json:"gasLimit"` GasUsed string `json:"gasUsed"` Timestamp string `json:"timestamp"` Transactions []interface{} `json:"transactions"` Signature string `json:"signature"` Step string `json:"step"` Uncles []string `json:"uncles"` } transactionObject struct { Hash string `json:"hash"` Nonce string `json:"nonce"` BlockHash string `json:"blockHash"` BlockNumber string `json:"blockNumber"` TransactionIndex string `json:"transactionIndex"` From string `json:"from"` To *string `json:"to"` Value string `json:"value"` GasPrice string `json:"gasPrice"` Gas string `json:"gas"` Input string `json:"input"` R string `json:"r"` S string `json:"s"` V string `json:"v"` StandardV string `json:"standardV"` Condition *string `json:"condition"` Creates *string `json:"creates"` ChainID string `json:"chainId"` PublicKey string `json:"publicKey"` } ) func hexStringToNumber(hexStr string) (uint64, error) { return strconv.ParseUint(removeHexPrefix(hexStr), 16, 64) } func ethAddrToIoAddr(ethAddr string) (string, error) { if ok := common.IsHexAddress(ethAddr); !ok { return "", errors.Wrapf(errUnkownType, "ethAddr: %s", ethAddr) } ioAddress, err := address.FromBytes(common.HexToAddress(ethAddr).Bytes()) if err != nil { return "", err } return ioAddress.String(), nil } func ioAddrToEthAddr(ioAddr string) (string, error) { if len(ioAddr) == 0 { return "0x0000000000000000000000000000000000000000", nil } addr, err := util.IoAddrToEvmAddr(ioAddr) if err != nil { return "", err } return addr.String(), nil } func uint64ToHex(val uint64) string { return "0x" + strconv.FormatUint(val, 16) } func intStrToHex(str string) (string, error) { amount, ok := big.NewInt(0).SetString(str, 10) if !ok { return "", errors.Wrapf(errUnkownType, "int: %s", str) } return "0x" + fmt.Sprintf("%x", amount), nil } func getStringFromArray(in interface{}, i int) (string, error) { params, ok := in.([]interface{}) if !ok || i < 0 || i >= len(params) { return "", errInvalidFormat } ret, ok := params[i].(string) if !ok { return "", errUnkownType } return ret, nil } func getStringAndBoolFromArray(in interface{}) (str string, b bool, err error) { params, ok := in.([]interface{}) if !ok || len(params) != 2 { err = errInvalidFormat return } str, ok = params[0].(string) if !ok { err = errUnkownType return } b, ok = params[1].(bool) if !ok { err = errUnkownType return } return } func removeHexPrefix(hexStr string) string { ret := strings.Replace(hexStr, "0x", "", -1) ret = strings.Replace(ret, "0X", "", -1) return ret } func (svr *Web3Server) getBlockWithTransactions(blkMeta *iotextypes.BlockMeta, isDetailed bool) (blockObject, error) { transactionsRoot := "0x" var transactions []interface{} if blkMeta.Height > 0 { actionInfos, err := svr.coreService.ActionsByBlock(blkMeta.Hash, 0, svr.coreService.cfg.API.RangeQueryLimit) if err != nil { return blockObject{}, err } for _, info := range actionInfos { if isDetailed { tx, err := svr.getTransactionFromActionInfo(info) if err != nil { log.L().Error("failed to get info from action", zap.Error(err), zap.String("info", fmt.Sprintf("%+v", info))) continue } transactions = append(transactions, *tx) } else { transactions = append(transactions, "0x"+info.ActHash) } } transactionsRoot = "0x" + blkMeta.TxRoot } // TODO: the value is the same as Babel's. It will be corrected in next pr if len(transactions) == 0 { transactionsRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" } bloom := "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" if len(blkMeta.LogsBloom) > 0 { bloom = blkMeta.LogsBloom } producerAddr, err := ioAddrToEthAddr(blkMeta.ProducerAddress) if err != nil { return blockObject{}, err } // TODO: the value is the same as Babel's. It will be corrected in next pr return blockObject{ Author: producerAddr, Number: uint64ToHex(blkMeta.Height), Hash: "0x" + blkMeta.Hash, ParentHash: "0x" + blkMeta.PreviousBlockHash, Sha3Uncles: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", LogsBloom: "0x" + bloom, TransactionsRoot: transactionsRoot, StateRoot: "0x" + blkMeta.DeltaStateDigest, ReceiptsRoot: "0x" + blkMeta.TxRoot, Miner: producerAddr, Difficulty: "0xfffffffffffffffffffffffffffffffe", TotalDifficulty: "0xff14700000000000000000000000486001d72", ExtraData: "0x", Size: uint64ToHex(uint64(blkMeta.NumActions)), GasLimit: uint64ToHex(blkMeta.GasLimit), GasUsed: uint64ToHex(blkMeta.GasUsed), Timestamp: uint64ToHex(uint64(blkMeta.Timestamp.Seconds)), Transactions: transactions, Step: "373422302", Signature: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", Uncles: []string{}, }, nil } func (svr *Web3Server) getTransactionFromActionInfo(actInfo *iotexapi.ActionInfo) (*transactionObject, error) { if actInfo.GetAction() == nil || actInfo.GetAction().GetCore() == nil { return nil, errNullPointer } var ( to *string value = "0x0" data = "0x" err error ) switch act := actInfo.Action.Core.Action.(type) { case *iotextypes.ActionCore_Transfer: value, err = intStrToHex(act.Transfer.GetAmount()) if err != nil { return nil, err } toTmp, err := ioAddrToEthAddr(act.Transfer.GetRecipient()) if err != nil { return nil, err } to = &toTmp case *iotextypes.ActionCore_Execution: value, err = intStrToHex(act.Execution.GetAmount()) if err != nil { return nil, err } if len(act.Execution.GetContract()) > 0 { toTmp, err := ioAddrToEthAddr(act.Execution.GetContract()) if err != nil { return nil, err } to = &toTmp } data = byteToHex(act.Execution.GetData()) // TODO: support other type actions default: return nil, errors.Errorf("the type of action %s is not supported", actInfo.ActHash) } vVal := uint64(actInfo.Action.Signature[64]) if vVal < 27 { vVal += 27 } from, err := ioAddrToEthAddr(actInfo.Sender) if err != nil { return nil, err } gasPrice, err := intStrToHex(actInfo.Action.Core.GasPrice) if err != nil { return nil, err } return &transactionObject{ Hash: "0x" + actInfo.ActHash, Nonce: uint64ToHex(actInfo.Action.Core.Nonce), BlockHash: "0x" + actInfo.BlkHash, BlockNumber: uint64ToHex(actInfo.BlkHeight), TransactionIndex: uint64ToHex(uint64(actInfo.Index)), From: from, To: to, Value: value, GasPrice: gasPrice, Gas: uint64ToHex(actInfo.Action.Core.GasLimit), Input: data, R: byteToHex(actInfo.Action.Signature[:32]), S: byteToHex(actInfo.Action.Signature[32:64]), V: uint64ToHex(vVal), // TODO: the value is the same as Babel's. It will be corrected in next pr StandardV: uint64ToHex(vVal), ChainID: uint64ToHex(uint64(svr.coreService.EVMNetworkID())), PublicKey: byteToHex(actInfo.Action.SenderPubKey), }, nil } func (svr *Web3Server) getTransactionCreateFromActionInfo(actInfo *iotexapi.ActionInfo) (transactionObject, error) { tx, err := svr.getTransactionFromActionInfo(actInfo) if err != nil { return transactionObject{}, err } if tx.To == nil { actHash, err := hash.HexStringToHash256(removeHexPrefix(tx.Hash)) if err != nil { return transactionObject{}, errors.Wrapf(errUnkownType, "txHash: %s", tx.Hash) } receipt, _, err := svr.coreService.ReceiptByAction(actHash) if err != nil { return transactionObject{}, err } addr, err := ioAddrToEthAddr(receipt.ContractAddress) if err != nil { return transactionObject{}, err } tx.Creates = &addr } return *tx, nil } func (svr *Web3Server) parseBlockNumber(str string) (uint64, error) { switch str { case "earliest": return 1, nil case "", "pending", "latest": return svr.coreService.bc.TipHeight(), nil default: return hexStringToNumber(str) } } func (svr *Web3Server) parseBlockRange(fromStr string, toStr string) (from uint64, to uint64, err error) { from, err = svr.parseBlockNumber(fromStr) if err != nil { return } to, err = svr.parseBlockNumber(toStr) if err != nil { return } tipHeight := svr.coreService.bc.TipHeight() if from > tipHeight { err = status.Error(codes.InvalidArgument, "start block > tip height") return } if to > tipHeight { to = tipHeight } return } func (svr *Web3Server) isContractAddr(addr string) (bool, error) { if addr == "" { return true, nil } accountMeta, _, err := svr.coreService.Account(addr) if err != nil { return false, err } return accountMeta.IsContract, nil } func (svr *Web3Server) getLogsWithFilter(from uint64, to uint64, addrs []string, topics [][]string) ([]logsObject, error) { // construct filter topics and addresses var filter iotexapi.LogsFilter for _, ethAddr := range addrs { ioAddr, err := ethAddrToIoAddr(ethAddr) if err != nil { return nil, err } filter.Address = append(filter.Address, ioAddr) } for _, tp := range topics { var topic [][]byte for _, str := range tp { b, err := hexToBytes(str) if err != nil { return nil, err } topic = append(topic, b) } filter.Topics = append(filter.Topics, &iotexapi.Topics{ Topic: topic, }) } logs, err := svr.coreService.getLogsInRange(logfilter.NewLogFilter(&filter, nil, nil), from, to, 1000) if err != nil { return nil, err } // parse log results var ret []logsObject for _, l := range logs { var topics []string for _, val := range l.Topics { topics = append(topics, byteToHex(val)) } contractAddr, err := ioAddrToEthAddr(l.ContractAddress) if err != nil { return nil, err } ret = append(ret, logsObject{ BlockHash: byteToHex(l.BlkHash), TransactionHash: byteToHex(l.ActHash), LogIndex: uint64ToHex(uint64(l.Index)), BlockNumber: uint64ToHex(l.BlkHeight), // TransactionIndex bug will be fixed in the next TransactionIndex: "0x1", Address: contractAddr, Data: byteToHex(l.Data), Topics: topics, }) } return ret, nil } func byteToHex(b []byte) string { return "0x" + hex.EncodeToString(b) } func hexToBytes(str string) ([]byte, error) { str = removeHexPrefix(str) if len(str)%2 == 1 { str = "0" + str } return hex.DecodeString(str) } func parseLogRequest(in gjson.Result) (*filterObject, error) { var logReq filterObject if len(in.Array()) > 0 { req := in.Array()[0] logReq.FromBlock = req.Get("fromBlock").String() logReq.ToBlock = req.Get("toBlock").String() for _, addr := range req.Get("address").Array() { logReq.Address = append(logReq.Address, addr.String()) } for _, topics := range req.Get("topics").Array() { if topics.IsArray() { var topicArr []string for _, topic := range topics.Array() { topicArr = append(topicArr, removeHexPrefix(topic.String())) } logReq.Topics = append(logReq.Topics, topicArr) } else { logReq.Topics = append(logReq.Topics, []string{removeHexPrefix(topics.String())}) } } } return &logReq, nil } func parseCallObject(in interface{}) (from string, to string, gasLimit uint64, value *big.Int, data []byte, err error) { params, ok := in.([]interface{}) if !ok { err = errInvalidFormat return } params0, ok := params[0].(map[string]interface{}) if !ok { err = errInvalidFormat return } req, err := json.Marshal(params0) if err != nil { return } callObj := struct { From string `json:"from,omitempty"` To string `json:"to,omitempty"` Gas string `json:"gas,omitempty"` GasPrice string `json:"gasPrice,omitempty"` Value string `json:"value,omitempty"` Data string `json:"data,omitempty"` }{} err = json.Unmarshal(req, &callObj) if err != nil { return } if callObj.To != "" { to, err = ethAddrToIoAddr(callObj.To) if err != nil { return } } if callObj.From == "" { callObj.From = "0x0000000000000000000000000000000000000000" } from, err = ethAddrToIoAddr(callObj.From) if err != nil { return } if callObj.Value != "" { value, ok = big.NewInt(0).SetString(removeHexPrefix(callObj.Value), 16) if !ok { err = errors.Wrapf(errUnkownType, "value: %s", callObj.Value) return } } else { value = big.NewInt(0) } if callObj.Gas != "" { gasLimit, err = hexStringToNumber(callObj.Gas) if err != nil { return } } data = common.FromHex(callObj.Data) return } func (svr *Web3Server) getLogQueryRange(fromStr, toStr string, logHeight uint64) (from uint64, to uint64, hasNewLogs bool, err error) { if from, to, err = svr.parseBlockRange(fromStr, toStr); err != nil { return } switch { case logHeight < from: hasNewLogs = true return case logHeight > to: hasNewLogs = false return default: from = logHeight hasNewLogs = true return } } func loadFilterFromCache(c apiCache, filterID string) (filterObject, error) { dataStr, isFound := c.Get(filterID) if !isFound { return filterObject{}, errInvalidFiterID } var filterObj filterObject if err := json.Unmarshal([]byte(dataStr), &filterObj); err != nil { return filterObject{}, err } return filterObj, nil } func newAPICache(expireTime time.Duration, remoteURL string) apiCache { redisClient := redis.NewClient(&redis.Options{ Addr: remoteURL, Password: "", // no password set DB: 0, // use default DB }) if redisClient.Ping(context.Background()).Err() != nil { log.L().Info("local cache is used as API cache") filterCache, _ := ttl.NewCache(ttl.AutoExpireOption(expireTime)) return &localCache{ ttlCache: filterCache, } } log.L().Info("remote cache is used as API cache") return &remoteCache{ redisCache: redisClient, expireTime: expireTime, } } type apiCache interface { Set(key string, data []byte) error Del(key string) bool Get(key string) ([]byte, bool) } type localCache struct { ttlCache *ttl.Cache } func (c *localCache) Set(key string, data []byte) error { if c.ttlCache == nil { return errNullPointer } c.ttlCache.Set(key, data) return nil } func (c *localCache) Del(key string) bool { if c.ttlCache == nil { return false } return c.ttlCache.Delete(key) } func (c *localCache) Get(key string) ([]byte, bool) { if c.ttlCache == nil { return nil, false } val, exist := c.ttlCache.Get(key) if !exist { return nil, false } ret, ok := val.([]byte) return ret, ok } type remoteCache struct { redisCache *redis.Client expireTime time.Duration } func (c *remoteCache) Set(key string, data []byte) error { if c.redisCache == nil { return errNullPointer } return c.redisCache.Set(context.Background(), key, data, c.expireTime).Err() } func (c *remoteCache) Del(key string) bool { if c.redisCache == nil { return false } err := c.redisCache.Unlink(context.Background(), key).Err() return err == nil } func (c *remoteCache) Get(key string) ([]byte, bool) { if c.redisCache == nil { return nil, false } ret, err := c.redisCache.Get(context.Background(), key).Bytes() if err == redis.Nil { return nil, false } else if err != nil { return nil, false } c.redisCache.Expire(context.Background(), key, c.expireTime) return ret, true }
1
24,226
revert this file change
iotexproject-iotex-core
go
@@ -74,9 +74,9 @@ class FileState: # # 1. def meth8(self): # 2. """test late disabling""" - # 3. pylint: disable=not-callable + # 3. pylint: disable=not-callable, useless-suppression # 4. print(self.blip) - # 5. pylint: disable=no-member + # 5. pylint: disable=no-member, useless-suppression # 6. print(self.bla) # # E1102 should be disabled from line 1 to 6 while E1101 from line 5 to 6
1
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE import collections import sys from typing import ( TYPE_CHECKING, DefaultDict, Dict, Iterator, Optional, Set, Tuple, Union, ) from astroid import nodes from pylint.constants import MSG_STATE_SCOPE_MODULE, WarningScope if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal if TYPE_CHECKING: from pylint.message import MessageDefinition, MessageDefinitionStore MessageStateDict = Dict[str, Dict[int, bool]] class FileState: """Hold internal state specific to the currently analyzed file""" def __init__(self, modname: Optional[str] = None) -> None: self.base_name = modname self._module_msgs_state: MessageStateDict = {} self._raw_module_msgs_state: MessageStateDict = {} self._ignored_msgs: DefaultDict[ Tuple[str, int], Set[int] ] = collections.defaultdict(set) self._suppression_mapping: Dict[Tuple[str, int], int] = {} self._effective_max_line_number: Optional[int] = None def collect_block_lines( self, msgs_store: "MessageDefinitionStore", module_node: nodes.Module ) -> None: """Walk the AST to collect block level options line numbers.""" for msg, lines in self._module_msgs_state.items(): self._raw_module_msgs_state[msg] = lines.copy() orig_state = self._module_msgs_state.copy() self._module_msgs_state = {} self._suppression_mapping = {} self._effective_max_line_number = module_node.tolineno self._collect_block_lines(msgs_store, module_node, orig_state) def _collect_block_lines( self, msgs_store: "MessageDefinitionStore", node: nodes.NodeNG, msg_state: MessageStateDict, ) -> None: """Recursively walk (depth first) AST to collect block level options line numbers. """ for child in node.get_children(): self._collect_block_lines(msgs_store, child, msg_state) first = node.fromlineno last = node.tolineno # first child line number used to distinguish between disable # which are the first child of scoped node with those defined later. # For instance in the code below: # # 1. def meth8(self): # 2. """test late disabling""" # 3. pylint: disable=not-callable # 4. print(self.blip) # 5. pylint: disable=no-member # 6. print(self.bla) # # E1102 should be disabled from line 1 to 6 while E1101 from line 5 to 6 # # this is necessary to disable locally messages applying to class / # function using their fromlineno if ( isinstance(node, (nodes.Module, nodes.ClassDef, nodes.FunctionDef)) and node.body ): firstchildlineno = node.body[0].fromlineno else: firstchildlineno = last for msgid, lines in msg_state.items(): for lineno, state in list(lines.items()): original_lineno = lineno if first > lineno or last < lineno: continue # Set state for all lines for this block, if the # warning is applied to nodes. message_definitions = msgs_store.get_message_definitions(msgid) for message_definition in message_definitions: if message_definition.scope == WarningScope.NODE: if lineno > firstchildlineno: state = True first_, last_ = node.block_range(lineno) else: first_ = lineno last_ = last for line in range(first_, last_ + 1): # do not override existing entries if line in self._module_msgs_state.get(msgid, ()): continue if line in lines: # state change in the same block state = lines[line] original_lineno = line if not state: self._suppression_mapping[(msgid, line)] = original_lineno try: self._module_msgs_state[msgid][line] = state except KeyError: self._module_msgs_state[msgid] = {line: state} del lines[lineno] def set_msg_status(self, msg: "MessageDefinition", line: int, status: bool) -> None: """Set status (enabled/disable) for a given message at a given line""" assert line > 0 try: self._module_msgs_state[msg.msgid][line] = status except KeyError: self._module_msgs_state[msg.msgid] = {line: status} def handle_ignored_message( self, state_scope: Optional[Literal[0, 1, 2]], msgid: str, line: int ) -> None: """Report an ignored message. state_scope is either MSG_STATE_SCOPE_MODULE or MSG_STATE_SCOPE_CONFIG, depending on whether the message was disabled locally in the module, or globally. """ if state_scope == MSG_STATE_SCOPE_MODULE: try: orig_line = self._suppression_mapping[(msgid, line)] self._ignored_msgs[(msgid, orig_line)].add(line) except KeyError: pass def iter_spurious_suppression_messages( self, msgs_store: "MessageDefinitionStore", ) -> Iterator[ Tuple[ Literal["useless-suppression", "suppressed-message"], int, Union[Tuple[str], Tuple[str, int]], ] ]: for warning, lines in self._raw_module_msgs_state.items(): for line, enable in lines.items(): if not enable and (warning, line) not in self._ignored_msgs: # ignore cyclic-import check which can show false positives # here due to incomplete context if warning != "R0401": yield "useless-suppression", line, ( msgs_store.get_msg_display_string(warning), ) # don't use iteritems here, _ignored_msgs may be modified by add_message for (warning, from_), ignored_lines in list(self._ignored_msgs.items()): for line in ignored_lines: yield "suppressed-message", line, ( msgs_store.get_msg_display_string(warning), from_, ) def get_effective_max_line_number(self) -> Optional[int]: return self._effective_max_line_number
1
17,762
I think this is the most elegant way to allow us to still fail on `I`.
PyCQA-pylint
py
@@ -415,10 +415,15 @@ class AdminController extends Controller ->from($entityClass, 'entity') ; + $wildcards = $this->getDoctrine()->getConnection()->getDatabasePlatform()->getWildcards(); + $searchQuery = addcslashes($searchQuery, implode('', $wildcards)); + foreach ($searchableFields as $name => $metadata) { - $wildcards = $this->getDoctrine()->getConnection()->getDatabasePlatform()->getWildcards(); - $searchQuery = addcslashes($searchQuery, implode('', $wildcards)); - $query->orWhere('entity.'.$name.' LIKE :query')->setParameter('query', '%'.$searchQuery.'%'); + if (in_array($metadata['fieldType'], array('text', 'string'))) { + $query->orWhere('entity.'.$name.' LIKE :fuzzy_value')->setParameter('fuzzy_value', '%'.$searchQuery.'%'); + } else { + $query->orWhere('entity.'.$name.' = :exact_value')->setParameter('exact_value', $searchQuery); + } } $paginator = new Pagerfanta(new DoctrineORMAdapter($query, false));
1
<?php /* * This file is part of the EasyAdminBundle. * * (c) Javier Eguiluz <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. * * Some parts of this file are copied and/or inspired by the * DoctrineCRUDGenerator included in the SensioGeneratorBundle. * License: MIT License * Copyright: (c) Fabien Potencier <[email protected]> * Source: https://github.com/sensiolabs/SensioGeneratorBundle */ namespace JavierEguiluz\Bundle\EasyAdminBundle\Controller; use Doctrine\ORM\EntityManager; use Doctrine\ORM\Mapping\ClassMetadataInfo; use Symfony\Component\Form\Form; use Symfony\Component\HttpFoundation\RedirectResponse; use Symfony\Component\HttpFoundation\Request; use Symfony\Component\HttpFoundation\Response; use Symfony\Bundle\FrameworkBundle\Controller\Controller; use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route; use Pagerfanta\Pagerfanta; use Pagerfanta\Adapter\DoctrineORMAdapter; /** * Class AdminController. */ class AdminController extends Controller { protected $config; protected $entity = array(); /** @var Request */ protected $request; /** @var EntityManager */ protected $em; protected $view; /** * @Route("/", name="admin") * * @param Request $request * * @return RedirectResponse|Response */ public function indexAction(Request $request) { $result = $this->initialize($request); // initialize() returns a Response object when an error occurs. // This allows to display a detailed error message. if ($result instanceof Response) { return $result; } $action = $request->query->get('action', 'list'); // for now, the homepage redirects to the 'list' action and view of the first entity if (null === $request->query->get('entity')) { return $this->redirect($this->generateUrl('admin', array( 'action' => $action, 'entity' => $this->getNameOfTheFirstConfiguredEntity(), 'view' => $this->view, ))); } return $this->{$action.'Action'}(); } /** * Utility method which initializes the configuration of the entity on which * the user is performing the action. * * If everything goes right, it returns null. If there is any error, it * returns a 404 error page using a Response object. * * @param Request $request * * @return Response|null */ protected function initialize(Request $request) { $this->config = $this->container->getParameter('easyadmin.config'); if (0 === count($this->config['entities'])) { return $this->render404error('@EasyAdmin/error/no_entities.html.twig'); } // this condition happens when accessing the backend homepage, which // then redirects to the 'list' action of the first configured entity if (null === $entityName = $request->query->get('entity')) { return; } if (!array_key_exists($entityName, $this->config['entities'])) { return $this->render404error('@EasyAdmin/error/undefined_entity.html.twig', array('entity_name' => $entityName)); } $this->entity = $this->get('easyadmin.configurator')->getEntityConfiguration($entityName); if (!$request->query->has('sortField')) { $request->query->set('sortField', $this->entity['primary_key_field_name']); } if (!$request->query->has('sortDirection') || !in_array(strtoupper($request->query->get('sortDirection')), array('ASC', 'DESC'))) { $request->query->set('sortDirection', 'DESC'); } $this->em = $this->getDoctrine()->getManagerForClass($this->entity['class']); $this->request = $request; $this->view = $this->request->query->get('view', 'list'); } /** * The method that is executed when the user performs a 'list' action on an entity. * * @return Response */ protected function listAction() { if (!$this->isActionAllowed('list')) { return $this->renderForbiddenActionError('list'); } $fields = $this->entity['list']['fields']; $paginator = $this->findAll($this->entity['class'], $this->request->query->get('page', 1), $this->config['list']['max_results'], $this->request->query->get('sortField'), $this->request->query->get('sortDirection')); return $this->render($this->entity['templates']['list'], array( 'paginator' => $paginator, 'fields' => $fields, 'view' => 'list', )); } /** * The method that is executed when the user performs a 'edit' action on an entity. * * @return RedirectResponse|Response */ protected function editAction() { if (!$this->isActionAllowed('edit')) { return $this->renderForbiddenActionError('edit'); } if ($this->request->isXmlHttpRequest()) { return $this->ajaxEdit(); } $id = $this->request->query->get('id'); if (!$item = $this->em->getRepository($this->entity['class'])->find($id)) { throw $this->createNotFoundException(sprintf('Unable to find entity (%s #%d).', $this->entity['name'], $id)); } $fields = $this->entity['edit']['fields']; $editForm = $this->createEditForm($item, $fields); $deleteForm = $this->createDeleteForm($this->entity['name'], $id); $editForm->handleRequest($this->request); if ($editForm->isValid()) { $this->prepareEditEntityForPersist($item); $this->em->flush(); return $this->redirect($this->generateUrl('admin', array('action' => 'list', 'view' => 'list', 'entity' => $this->entity['name']))); } return $this->render($this->entity['templates']['edit'], array( 'form' => $editForm->createView(), 'entity_fields' => $fields, 'item' => $item, 'delete_form' => $deleteForm->createView(), 'view' => 'edit', )); } /** * The method that is executed when the user performs a 'show' action on an entity. * * @return Response */ protected function showAction() { if (!$this->isActionAllowed('show')) { return $this->renderForbiddenActionError('show'); } $id = $this->request->query->get('id'); if (!$item = $this->em->getRepository($this->entity['class'])->find($id)) { throw $this->createNotFoundException(sprintf('Unable to find entity (%s #%d).', $this->entity['name'], $id)); } $fields = $this->entity['show']['fields']; $deleteForm = $this->createDeleteForm($this->entity['name'], $id); return $this->render($this->entity['templates']['show'], array( 'item' => $item, 'fields' => $fields, 'view' => 'show', 'delete_form' => $deleteForm->createView(), )); } /** * The method that is executed when the user performs a 'new' action on an entity. * * @return RedirectResponse|Response */ protected function newAction() { if (!$this->isActionAllowed('new')) { return $this->renderForbiddenActionError('new'); } $item = $this->instantiateNewEntity(); $fields = $fields = $this->entity['new']['fields']; $newForm = $this->createNewForm($item, $fields); $newForm->handleRequest($this->request); if ($newForm->isValid()) { $this->prepareNewEntityForPersist($item); $this->em->persist($item); $this->em->flush(); return $this->redirect($this->generateUrl('admin', array('action' => 'list', 'view' => 'new', 'entity' => $this->entity['name']))); } return $this->render($this->entity['templates']['new'], array( 'form' => $newForm->createView(), 'entity_fields' => $fields, 'item' => $item, 'view' => 'new', )); } /** * The method that is executed when the user performs a 'delete' action to * remove any entity. * * @return RedirectResponse */ protected function deleteAction() { if ('DELETE' !== $this->request->getMethod()) { return $this->redirect($this->generateUrl('admin', array('action' => 'list', 'view' => 'list', 'entity' => $this->entity['name']))); } $id = $this->request->query->get('id'); $form = $this->createDeleteForm($this->entity['name'], $id); $form->handleRequest($this->request); if ($form->isValid()) { if (!$entity = $this->em->getRepository($this->entity['class'])->find($id)) { throw $this->createNotFoundException('The entity to be delete does not exist.'); } $this->em->remove($entity); $this->em->flush(); } return $this->redirect($this->generateUrl('admin', array('action' => 'list', 'view' => 'list', 'entity' => $this->entity['name']))); } /** * The method that is executed when the user performs a query on an entity. * * @return Response */ protected function searchAction() { $searchableFields = $this->entity['search']['fields']; $paginator = $this->findBy($this->entity['class'], $this->request->query->get('query'), $searchableFields, $this->request->query->get('page', 1), $this->config['list']['max_results']); $fields = $this->entity['list']['fields']; return $this->render($this->entity['templates']['list'], array( 'paginator' => $paginator, 'fields' => $fields, 'view' => 'search', )); } /** * Modifies the entity properties via an Ajax call. Currently it's used for * changing the value of boolean properties when the user clicks on the * flip switched displayed for boolean values in the 'list' action. */ protected function ajaxEdit() { if (!$entity = $this->em->getRepository($this->entity['class'])->find($this->request->query->get('id'))) { throw new \Exception('The entity does not exist.'); } $propertyName = $this->request->query->get('property'); $propertyMetadata = $this->entity['list']['fields'][$propertyName]; if (!isset($this->entity['list']['fields'][$propertyName]) || 'toggle' != $propertyMetadata['dataType']) { throw new \Exception(sprintf('The "%s" property is not a switchable toggle.', $propertyName)); } if (!$propertyMetadata['canBeSet']) { throw new \Exception(sprintf('It\'s not possible to toggle the value of the "%s" boolean property of the "%s" entity.', $propertyName, $this->entity['name'])); } $newValue = ('true' === strtolower($this->request->query->get('newValue'))) ? true : false; if (null !== $setter = $propertyMetadata['setter']) { $entity->{$setter}($newValue); } else { $entity->{$propertyName} = $newValue; } $this->em->flush(); return new Response((string) $newValue); } /** * Creates a new object of the current managed entity. * This method is mostly here for override convenience, because it allows * the user to use his own method to customize the entity instanciation. * * @return object */ protected function instantiateNewEntity() { $entityFullyQualifiedClassName = $this->entity['class']; return new $entityFullyQualifiedClassName(); } /** * Allows applications to modify the entity associated with the item being * edited before persisting it. * * @param object $entity * * @return object */ protected function prepareEditEntityForPersist($entity) { return $entity; } /** * Allows applications to modify the entity associated with the item being * created before persisting it. * * @param object $entity * * @return object */ protected function prepareNewEntityForPersist($entity) { return $entity; } /** * Performs a database query to get all the records related to the given * entity. It supports pagination and field sorting. * * @param string $entityClass * @param int $page * @param int $maxPerPage * @param string|null $sortField * @param string|null $sortDirection * * @return Pagerfanta The paginated query results */ protected function findAll($entityClass, $page = 1, $maxPerPage = 15, $sortField = null, $sortDirection = null) { $query = $this->em->createQueryBuilder() ->select('entity') ->from($entityClass, 'entity') ; if (null !== $sortField) { if (empty($sortDirection) || !in_array(strtoupper($sortDirection), array('ASC', 'DESC'))) { $sortDirection = 'DESC'; } $query->orderBy('entity.'.$sortField, $sortDirection); } $paginator = new Pagerfanta(new DoctrineORMAdapter($query, false)); $paginator->setMaxPerPage($maxPerPage); $paginator->setCurrentPage($page); return $paginator; } /** * Performs a database query based on the search query provided by the user. * It supports pagination and field sorting. * * @param string $entityClass * @param string $searchQuery * @param array $searchableFields * @param int $page * @param int $maxPerPage * * @return Pagerfanta The paginated query results */ protected function findBy($entityClass, $searchQuery, array $searchableFields, $page = 1, $maxPerPage = 15) { $query = $this->em->createQueryBuilder() ->select('entity') ->from($entityClass, 'entity') ; foreach ($searchableFields as $name => $metadata) { $wildcards = $this->getDoctrine()->getConnection()->getDatabasePlatform()->getWildcards(); $searchQuery = addcslashes($searchQuery, implode('', $wildcards)); $query->orWhere('entity.'.$name.' LIKE :query')->setParameter('query', '%'.$searchQuery.'%'); } $paginator = new Pagerfanta(new DoctrineORMAdapter($query, false)); $paginator->setMaxPerPage($maxPerPage); $paginator->setCurrentPage($page); return $paginator; } /** * Creates the form used to edit an entity. * * @param object $entity * @param array $entityProperties * * @return Form */ protected function createEditForm($entity, array $entityProperties) { return $this->createEntityForm($entity, $entityProperties, 'edit'); } /** * Creates the form used to create an entity. * * @param object $entity * @param array $entityProperties * * @return Form */ protected function createNewForm($entity, array $entityProperties) { return $this->createEntityForm($entity, $entityProperties, 'new'); } /** * Creates the form used to create or edit an entity. * * @param object $entity * @param array $entityProperties * @param string $view The name of the view where this form is used ('new' or 'edit') * * @return Form */ protected function createEntityForm($entity, array $entityProperties, $view) { $formCssClass = array_reduce($this->config['design']['form_theme'], function ($previousClass, $formTheme) { return sprintf('theme_%s %s', strtolower(str_replace('.html.twig', '', basename($formTheme))), $previousClass); }); $form = $this->createFormBuilder($entity, array( 'data_class' => $this->entity['class'], 'attr' => array('class' => $formCssClass, 'id' => $view.'-form'), )); foreach ($entityProperties as $name => $metadata) { $formFieldOptions = array(); if ('association' === $metadata['fieldType'] && in_array($metadata['associationType'], array(ClassMetadataInfo::ONE_TO_MANY, ClassMetadataInfo::MANY_TO_MANY))) { continue; } if ('collection' === $metadata['fieldType']) { $formFieldOptions = array('allow_add' => true, 'allow_delete' => true); if (version_compare(\Symfony\Component\HttpKernel\Kernel::VERSION, '2.5.0', '>=')) { $formFieldOptions['delete_empty'] = true; } } $formFieldOptions['attr']['field_type'] = $metadata['fieldType']; $formFieldOptions['attr']['field_css_class'] = $metadata['class']; $formFieldOptions['attr']['field_help'] = $metadata['help']; $form->add($name, $metadata['fieldType'], $formFieldOptions); } return $form->getForm(); } /** * It returns the name of the first entity configured in the backend. It's * mainly used to redirect the homepage of the backend to the listing of the * first configured entity. * * @return mixed */ protected function getNameOfTheFirstConfiguredEntity() { $entityNames = array_keys($this->config['entities']); return $entityNames[0]; } /** * Creates the form used to delete an entity. It must be a form because * the deletion of the entity are always performed with the 'DELETE' HTTP method, * which requires a form to work in the current browsers. * * @param string $entityName * @param int $entityId * * @return Form */ protected function createDeleteForm($entityName, $entityId) { return $this->createFormBuilder() ->setAction($this->generateUrl('admin', array('action' => 'delete', 'entity' => $entityName, 'id' => $entityId))) ->setMethod('DELETE') ->add('submit', 'submit', array('label' => 'Delete')) ->getForm() ; } /** * Utility shortcut to render a template as a 404 error page. * * @param string $view * @param array $parameters * * @return Response */ protected function render404error($view, array $parameters = array()) { return $this->render($view, $parameters, new Response('', 404)); } /** * Utility method that checks if the given action is allowed for the current * view of the current entity. * * @param string $action * * @return bool */ protected function isActionAllowed($action) { return array_key_exists($action, $this->entity[$this->view]['actions']); } /** * Utility shortcut to render an error when the requested action is not allowed * for the given view of the given entity. * * @param string $action * * @return Response */ protected function renderForbiddenActionError($action) { $allowedActions = array_keys($this->entity[$this->view]['actions']); $parameters = array('action' => $action, 'allowed_actions' => $allowedActions, 'view' => $this->view); return $this->render('@EasyAdmin/error/forbidden_action.html.twig', $parameters, new Response('', 403)); } /** * It renders the main CSS applied to the backend design. This controller * allows to generate dynamic CSS files that use variables without the need * to set up a CSS preprocessing toolchain. * * @Route("/_css/admin.css", name="_easyadmin_render_css") */ public function renderCssAction() { $config = $this->container->getParameter('easyadmin.config'); $cssContent = $this->renderView('@EasyAdmin/css/admin.css.twig', array( 'brand_color' => $config['design']['brand_color'], 'color_scheme' => $config['design']['color_scheme'], )); $response = new Response($cssContent, 200, array('Content-Type' => 'text/css')); $response->setPublic(); $response->setSharedMaxAge(600); return $response; } }
1
8,427
Wouldn't it be better to use a sql `IN ( :exact_value )` ?
EasyCorp-EasyAdminBundle
php
@@ -9,9 +9,14 @@ module Bolt @shell = shell @endpoint = "http://#{host}:#{port}/wsman" - @connection = ::WinRM::Connection.new(endpoint: @endpoint, - user: @user, - password: @password) + options = { endpoint: @endpoint, + user: @user, + password: @password } + if @timeout then + options[:receive_timeout] = @timeout + options[:operation_timeout] = @timeout + end + @connection = ::WinRM::Connection.new(options) @connection.logger = @transport_logger end
1
require 'winrm' require 'winrm-fs' require 'bolt/result' module Bolt class WinRM < Node def initialize(host, port, user, password, shell: :powershell, **kwargs) super(host, port, user, password, **kwargs) @shell = shell @endpoint = "http://#{host}:#{port}/wsman" @connection = ::WinRM::Connection.new(endpoint: @endpoint, user: @user, password: @password) @connection.logger = @transport_logger end def connect @session = @connection.shell(@shell) @logger.debug { "Opened session" } end def disconnect @session.close if @session @logger.debug { "Closed session" } end def shell_init return Bolt::Node::Success.new if @shell_initialized result = execute(<<-PS) $ENV:PATH += ";${ENV:ProgramFiles}\\Puppet Labs\\Puppet\\sys\\ruby\\bin\\" $ENV:RUBYLIB = "${ENV:ProgramFiles}\\Puppet Labs\\Puppet\\puppet\\lib;" + "${ENV:ProgramFiles}\\Puppet Labs\\Puppet\\facter\\lib;" + "${ENV:ProgramFiles}\\Puppet Labs\\Puppet\\hiera\\lib;" + $ENV:RUBYLIB function Invoke-Interpreter { [CmdletBinding()] Param ( [Parameter()] [String] $Path, [Parameter()] [String] $Arguments, [Parameter()] [Int32] $Timeout, [Parameter()] [String] $StdinInput = $Null ) $startInfo = New-Object System.Diagnostics.ProcessStartInfo($Path, $Arguments) $startInfo.UseShellExecute = $false if ($StdinInput) { $startInfo.RedirectStandardInput = $true } $startInfo.RedirectStandardOutput = $true $startInfo.RedirectStandardError = $true try { $process = [System.Diagnostics.Process]::Start($startInfo) } catch { Write-Error $_ return 1 } if ($StdinInput) { $process.StandardInput.WriteLine($StdinInput) $process.StandardInput.Close() } # streams must have .ReadToEnd() called prior to process .WaitForExit() # to prevent deadlocks per MSDN # https://msdn.microsoft.com/en-us/library/system.diagnostics.process.standarderror(v=vs.110).aspx#Anchor_2 $process.StandardOutput.ReadToEnd() | Out-Host $stderr = $process.StandardError.ReadToEnd() if ($stderr) { Write-Error $stderr } $process.WaitForExit($Timeout) | Out-Null return $process.ExitCode } PS @shell_initialized = true result end def execute(command, _ = {}) result_output = Bolt::Node::ResultOutput.new @logger.debug { "Executing command: #{command}" } output = @session.run(command) do |stdout, stderr| result_output.stdout << stdout @logger.debug { "stdout: #{stdout}" } result_output.stderr << stderr @logger.debug { "stderr: #{stderr}" } end if output.exitcode.zero? @logger.debug { "Command returned successfully" } Bolt::Node::Success.new(result_output.stdout.string, result_output) else @logger.info { "Command failed with exit code #{output.exitcode}" } Bolt::Node::Failure.new(output.exitcode, result_output) end end # 10 minutes in milliseconds DEFAULT_EXECUTION_TIMEOUT = 10 * 60 * 1000 def execute_process(path = '', arguments = '', stdin = nil, timeout_ms = DEFAULT_EXECUTION_TIMEOUT) execute(<<-PS) $invokeArgs = @{ Path = "#{path}" Arguments = "#{arguments.gsub('"', '""')}" Timeout = #{timeout_ms} #{stdin.nil? ? '' : "StdinInput = @'\n" + stdin + "\n'@"} } # winrm gem relies on $LASTEXITCODE $LASTEXITCODE = Invoke-Interpreter @invokeArgs PS end VALID_EXTENSIONS = ['.ps1', '.rb'].freeze PS_ARGS = '-NoProfile -NonInteractive -NoLogo -ExecutionPolicy Bypass'.freeze def process_from_extension(path) case Pathname(path).extname.downcase when '.rb' [ 'ruby.exe', "-S \"#{path}\"" ] when '.ps1' [ 'powershell.exe', "#{PS_ARGS} -File \"#{path}\"" ] end end def _upload(source, destination) @logger.debug { "Uploading #{source} to #{destination}" } fs = ::WinRM::FS::FileManager.new(@connection) fs.upload(source, destination) Bolt::Node::Success.new rescue StandardError => ex Bolt::Node::ExceptionFailure.new(ex) end def make_tempdir result = execute(<<-PS) $parent = [System.IO.Path]::GetTempPath() $name = [System.IO.Path]::GetRandomFileName() $path = Join-Path $parent $name New-Item -ItemType Directory -Path $path | Out-Null $path PS result.then { |stdout| Bolt::Node::Success.new(stdout.chomp) } end def with_remote_file(file) dest = '' dir = '' result = nil make_tempdir.then do |value| dir = value ext = File.extname(file) ext = VALID_EXTENSIONS.include?(ext) ? ext : '.ps1' dest = "#{dir}\\#{File.basename(file, '.*')}#{ext}" Bolt::Node::Success.new end.then do _upload(file, dest) end.then do shell_init end.then do result = yield dest end.then do execute(<<-PS) Remove-Item -Force "#{dest}" Remove-Item -Force "#{dir}" PS result end end def _run_command(command) execute(command) end def _run_script(script) @logger.info { "Running script '#{script}'" } with_remote_file(script) do |remote_path| args = "#{PS_ARGS} -File \"#{remote_path}\"" execute_process('powershell.exe', args) end end def _run_task(task, input_method, arguments) @logger.info { "Running task '#{task}'" } @logger.debug { "arguments: #{arguments}\ninput_method: #{input_method}" } if STDIN_METHODS.include?(input_method) stdin = JSON.dump(arguments) end if ENVIRONMENT_METHODS.include?(input_method) arguments.reduce(Bolt::Node::Success.new) do |result, (arg, val)| result.then do cmd = "[Environment]::SetEnvironmentVariable('PT_#{arg}', '#{val}')" execute(cmd) end end else Bolt::Node::Success.new end.then do with_remote_file(task) do |remote_path| execute_process(*process_from_extension(remote_path), stdin) end end end end end
1
6,620
So far this is not working as expected... working to further investigate WinRM protocol and the gems behavior.
puppetlabs-bolt
rb
@@ -68,7 +68,7 @@ class ApacheBenchmarkExecutor(ScenarioExecutor, WidgetProvider, HavingInstallabl return self.widget def __first_http_request(self): - for request in self.scenario.get_requests(): + for request in self.get_requests(): if isinstance(request, HTTPRequest): return request return None
1
""" Module holds all stuff regarding usage of Apache Benchmark Copyright 2016 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import logging import re from math import ceil from distutils.version import LooseVersion from bzt import TaurusConfigError from bzt.engine import ScenarioExecutor, HavingInstallableTools, SelfDiagnosable from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader from bzt.modules.console import WidgetProvider, ExecutorWidget from bzt.requests_model import HTTPRequest from bzt.six import iteritems from bzt.utils import CALL_PROBLEMS, shutdown_process, RequiredTool, dehumanize_time, FileReader class ApacheBenchmarkExecutor(ScenarioExecutor, WidgetProvider, HavingInstallableTools, SelfDiagnosable): """ Apache Benchmark executor module """ def __init__(self): super(ApacheBenchmarkExecutor, self).__init__() self.log = logging.getLogger('') self.process = None self._tsv_file = None self.tool = None self.scenario = None def prepare(self): self.scenario = self.get_scenario() self.install_required_tools() self._tsv_file = self.engine.create_artifact("ab", ".tsv") self.stdout = open(self.engine.create_artifact("ab", ".out"), 'w') self.stderr = open(self.engine.create_artifact("ab", ".err"), 'w') self.reader = TSVDataReader(self._tsv_file, self.log) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(self.reader) def get_widget(self): """ Add progress widget to console screen sidebar :return: """ if not self.widget: label = "%s" % self self.widget = ExecutorWidget(self, "ab: " + label.split('/')[1]) return self.widget def __first_http_request(self): for request in self.scenario.get_requests(): if isinstance(request, HTTPRequest): return request return None def startup(self): args = [self.tool.tool_path] load = self.get_load() load_iterations = load.iterations or 1 load_concurrency = load.concurrency or 1 if load.hold: hold = int(ceil(dehumanize_time(load.hold))) args += ['-t', str(hold)] else: args += ['-n', str(load_iterations * load_concurrency)] # ab waits for total number of iterations args += ['-c', str(load_concurrency)] args += ['-d'] # do not print 'Processed *00 requests' every 100 requests or so args += ['-r'] # do not crash on socket level errors if self.tool.version and LooseVersion(self.tool.version) >= LooseVersion("2.4.7"): args += ['-l'] # accept variable-len responses args += ['-g', str(self._tsv_file)] # dump stats to TSV file # add global scenario headers for key, val in iteritems(self.scenario.get_headers()): args += ['-H', "%s: %s" % (key, val)] requests = self.scenario.get_requests() if not requests: raise TaurusConfigError("You must specify at least one request for ab") if len(requests) > 1: self.log.warning("ab doesn't support multiple requests. Only first one will be used.") request = self.__first_http_request() if request is None: raise TaurusConfigError("ab supports only HTTP requests, while scenario doesn't have any") # add request-specific headers for key, val in iteritems(request.headers): args += ['-H', "%s: %s" % (key, val)] if request.method != 'GET': raise TaurusConfigError("ab supports only GET requests, but '%s' is found" % request.method) if request.priority_option('keepalive', default=True): args += ['-k'] args += [request.url] self.reader.setup(load_concurrency, request.label) self.process = self.execute(args) def check(self): ret_code = self.process.poll() if ret_code is None: return False if ret_code != 0: self.log.warning("ab tool exited with non-zero code: %s", ret_code) return True def shutdown(self): shutdown_process(self.process, self.log) def install_required_tools(self): self.tool = self._get_tool(ApacheBenchmark, config=self.settings) if not self.tool.check_if_installed(): self.tool.install() def get_error_diagnostics(self): diagnostics = [] if self.stdout is not None: with open(self.stdout.name) as fds: contents = fds.read().strip() if contents.strip(): diagnostics.append("ab STDOUT:\n" + contents) if self.stderr is not None: with open(self.stderr.name) as fds: contents = fds.read().strip() if contents.strip(): diagnostics.append("ab STDERR:\n" + contents) return diagnostics class TSVDataReader(ResultsReader): def __init__(self, filename, parent_logger): super(TSVDataReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.skipped_header = False self.concurrency = None self.url_label = None def setup(self, concurrency, url_label): self.concurrency = concurrency self.url_label = url_label return True def _read(self, last_pass=False): lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass) for line in lines: if not self.skipped_header: self.skipped_header = True continue log_vals = [val.strip() for val in line.split('\t')] _error = None _rstatus = None _url = self.url_label _concur = self.concurrency _tstamp = int(log_vals[1]) # timestamp - moment of request sending _con_time = float(log_vals[2]) / 1000 # connection time _etime = float(log_vals[4]) / 1000 # elapsed time _latency = float(log_vals[5]) / 1000 # latency (aka waittime) _bytes = None yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _bytes class ApacheBenchmark(RequiredTool): def __init__(self, config=None, **kwargs): settings = config or {} tool_path = settings.get('path', 'ab') super(ApacheBenchmark, self).__init__(tool_path=tool_path, installable=False, **kwargs) def _get_version(self, output): version = re.findall("Version\s(\S+)\s", output) if not version: self.log.warning("%s tool version parsing error: %s", self.tool_name, output) else: return version[0] def check_if_installed(self): self.log.debug('Trying %s: %s', self.tool_name, self.tool_path) try: out, err = self.call([self.tool_path, '-V']) self.version = self._get_version(out) self.log.debug("%s check stdout: %s", self.tool_name, out) if err: self.log.warning("%s check stderr: %s", self.tool_name, err) return True except CALL_PROBLEMS as exc: self.log.warning("%s check failed: %s", self.tool_name, exc) return False
1
15,333
So now there're two ways to get requests: `ScenarioExecutor.get_requests()` and `Scenario.get_requests()`. To avoid confusion we should probably make `Scenario.get_requests()` method private, or hide it in some other way.
Blazemeter-taurus
py
@@ -2436,10 +2436,10 @@ public: } else if (message_a.response) { - auto node_id (message_a.response->first); - connection->remote_node_id = node_id; + nano::account node_id (message_a.response->first); if (!connection->node->network.syn_cookies.validate (nano::transport::map_tcp_to_endpoint (connection->remote_endpoint), node_id, message_a.response->second) && node_id != connection->node->node_id.pub) { + connection->remote_node_id = node_id; connection->type = nano::bootstrap_server_type::realtime; ++connection->node->bootstrap.realtime_count; connection->finish_request_async ();
1
#include <nano/crypto_lib/random_pool.hpp> #include <nano/node/bootstrap.hpp> #include <nano/node/common.hpp> #include <nano/node/node.hpp> #include <nano/node/transport/tcp.hpp> #include <nano/node/transport/udp.hpp> #include <boost/log/trivial.hpp> #include <algorithm> constexpr double bootstrap_connection_scale_target_blocks = 50000.0; constexpr double bootstrap_connection_warmup_time_sec = 5.0; constexpr double bootstrap_minimum_blocks_per_sec = 10.0; constexpr double bootstrap_minimum_elapsed_seconds_blockrate = 0.02; constexpr double bootstrap_minimum_frontier_blocks_per_sec = 1000.0; constexpr unsigned bootstrap_frontier_retry_limit = 16; constexpr double bootstrap_minimum_termination_time_sec = 30.0; constexpr unsigned bootstrap_max_new_connections = 10; constexpr unsigned bulk_push_cost_limit = 200; size_t constexpr nano::frontier_req_client::size_frontier; nano::bootstrap_client::bootstrap_client (std::shared_ptr<nano::node> node_a, std::shared_ptr<nano::bootstrap_attempt> attempt_a, std::shared_ptr<nano::transport::channel_tcp> channel_a) : node (node_a), attempt (attempt_a), channel (channel_a), receive_buffer (std::make_shared<std::vector<uint8_t>> ()), start_time (std::chrono::steady_clock::now ()), block_count (0), pending_stop (false), hard_stop (false) { ++attempt->connections; receive_buffer->resize (256); } nano::bootstrap_client::~bootstrap_client () { --attempt->connections; } double nano::bootstrap_client::block_rate () const { auto elapsed = std::max (elapsed_seconds (), bootstrap_minimum_elapsed_seconds_blockrate); return static_cast<double> (block_count.load () / elapsed); } double nano::bootstrap_client::elapsed_seconds () const { return std::chrono::duration_cast<std::chrono::duration<double>> (std::chrono::steady_clock::now () - start_time).count (); } void nano::bootstrap_client::stop (bool force) { pending_stop = true; if (force) { hard_stop = true; } } void nano::frontier_req_client::run () { nano::frontier_req request; request.start.clear (); request.age = std::numeric_limits<decltype (request.age)>::max (); request.count = std::numeric_limits<decltype (request.count)>::max (); auto this_l (shared_from_this ()); connection->channel->send ( request, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { this_l->receive_frontier (); } else { if (this_l->connection->node->config.logging.network_logging ()) { this_l->connection->node->logger.try_log (boost::str (boost::format ("Error while sending bootstrap request %1%") % ec.message ())); } } }, false); // is bootstrap traffic is_dropable false } std::shared_ptr<nano::bootstrap_client> nano::bootstrap_client::shared () { return shared_from_this (); } nano::frontier_req_client::frontier_req_client (std::shared_ptr<nano::bootstrap_client> connection_a) : connection (connection_a), current (0), count (0), bulk_push_cost (0) { auto transaction (connection->node->store.tx_begin_read ()); next (transaction); } nano::frontier_req_client::~frontier_req_client () { } void nano::frontier_req_client::receive_frontier () { auto this_l (shared_from_this ()); connection->channel->socket->async_read (connection->receive_buffer, nano::frontier_req_client::size_frontier, [this_l](boost::system::error_code const & ec, size_t size_a) { // An issue with asio is that sometimes, instead of reporting a bad file descriptor during disconnect, // we simply get a size of 0. if (size_a == nano::frontier_req_client::size_frontier) { this_l->received_frontier (ec, size_a); } else { if (this_l->connection->node->config.logging.network_message_logging ()) { this_l->connection->node->logger.try_log (boost::str (boost::format ("Invalid size: expected %1%, got %2%") % nano::frontier_req_client::size_frontier % size_a)); } } }); } void nano::frontier_req_client::unsynced (nano::block_hash const & head, nano::block_hash const & end) { if (bulk_push_cost < bulk_push_cost_limit) { connection->attempt->add_bulk_push_target (head, end); if (end.is_zero ()) { bulk_push_cost += 2; } else { bulk_push_cost += 1; } } } void nano::frontier_req_client::received_frontier (boost::system::error_code const & ec, size_t size_a) { if (!ec) { assert (size_a == nano::frontier_req_client::size_frontier); nano::account account; nano::bufferstream account_stream (connection->receive_buffer->data (), sizeof (account)); auto error1 (nano::try_read (account_stream, account)); assert (!error1); nano::block_hash latest; nano::bufferstream latest_stream (connection->receive_buffer->data () + sizeof (account), sizeof (latest)); auto error2 (nano::try_read (latest_stream, latest)); assert (!error2); if (count == 0) { start_time = std::chrono::steady_clock::now (); } ++count; std::chrono::duration<double> time_span = std::chrono::duration_cast<std::chrono::duration<double>> (std::chrono::steady_clock::now () - start_time); double elapsed_sec = std::max (time_span.count (), bootstrap_minimum_elapsed_seconds_blockrate); double blocks_per_sec = static_cast<double> (count) / elapsed_sec; if (elapsed_sec > bootstrap_connection_warmup_time_sec && blocks_per_sec < bootstrap_minimum_frontier_blocks_per_sec) { connection->node->logger.try_log (boost::str (boost::format ("Aborting frontier req because it was too slow"))); promise.set_value (true); return; } if (connection->attempt->should_log ()) { connection->node->logger.always_log (boost::str (boost::format ("Received %1% frontiers from %2%") % std::to_string (count) % connection->channel->to_string ())); } auto transaction (connection->node->store.tx_begin_read ()); if (!account.is_zero ()) { while (!current.is_zero () && current < account) { // We know about an account they don't. unsynced (frontier, 0); next (transaction); } if (!current.is_zero ()) { if (account == current) { if (latest == frontier) { // In sync } else { if (connection->node->store.block_exists (transaction, latest)) { // We know about a block they don't. unsynced (frontier, latest); } else { connection->attempt->add_pull (nano::pull_info (account, latest, frontier)); // Either we're behind or there's a fork we differ on // Either way, bulk pushing will probably not be effective bulk_push_cost += 5; } } next (transaction); } else { assert (account < current); connection->attempt->add_pull (nano::pull_info (account, latest, nano::block_hash (0))); } } else { connection->attempt->add_pull (nano::pull_info (account, latest, nano::block_hash (0))); } receive_frontier (); } else { while (!current.is_zero ()) { // We know about an account they don't. unsynced (frontier, 0); next (transaction); } if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log ("Bulk push cost: ", bulk_push_cost); } { try { promise.set_value (false); } catch (std::future_error &) { } connection->attempt->pool_connection (connection); } } } else { if (connection->node->config.logging.network_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Error while receiving frontier %1%") % ec.message ())); } } } void nano::frontier_req_client::next (nano::transaction const & transaction_a) { // Filling accounts deque to prevent often read transactions if (accounts.empty ()) { size_t max_size (128); for (auto i (connection->node->store.latest_begin (transaction_a, current.number () + 1)), n (connection->node->store.latest_end ()); i != n && accounts.size () != max_size; ++i) { nano::account_info const & info (i->second); nano::account const & account (i->first); accounts.emplace_back (account, info.head); } /* If loop breaks before max_size, then latest_end () is reached Add empty record to finish frontier_req_server */ if (accounts.size () != max_size) { accounts.emplace_back (nano::account (0), nano::block_hash (0)); } } // Retrieving accounts from deque auto const & account_pair (accounts.front ()); current = account_pair.first; frontier = account_pair.second; accounts.pop_front (); } nano::bulk_pull_client::bulk_pull_client (std::shared_ptr<nano::bootstrap_client> connection_a, nano::pull_info const & pull_a) : connection (connection_a), known_account (0), pull (pull_a), total_blocks (0), unexpected_count (0) { std::lock_guard<std::mutex> mutex (connection->attempt->mutex); connection->attempt->condition.notify_all (); } nano::bulk_pull_client::~bulk_pull_client () { // If received end block is not expected end block if (expected != pull.end) { pull.head = expected; if (connection->attempt->mode != nano::bootstrap_mode::legacy) { pull.account = expected; } pull.processed += total_blocks - unexpected_count; connection->attempt->requeue_pull (pull); if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Bulk pull end block is not expected %1% for account %2%") % pull.end.to_string () % pull.account.to_account ())); } } else { connection->node->bootstrap_initiator.cache.remove (pull); } { std::lock_guard<std::mutex> mutex (connection->attempt->mutex); --connection->attempt->pulling; } connection->attempt->condition.notify_all (); } void nano::bulk_pull_client::request () { expected = pull.head; nano::bulk_pull req; req.start = (pull.head == pull.head_original) ? pull.account : pull.head; // Account for new pulls, head for cached pulls req.end = pull.end; req.count = pull.count; req.set_count_present (pull.count != 0); if (connection->node->config.logging.bulk_pull_logging ()) { std::unique_lock<std::mutex> lock (connection->attempt->mutex); connection->node->logger.try_log (boost::str (boost::format ("Requesting account %1% from %2%. %3% accounts in queue") % pull.account.to_account () % connection->channel->to_string () % connection->attempt->pulls.size ())); } else if (connection->node->config.logging.network_logging () && connection->attempt->should_log ()) { std::unique_lock<std::mutex> lock (connection->attempt->mutex); connection->node->logger.always_log (boost::str (boost::format ("%1% accounts in pull queue") % connection->attempt->pulls.size ())); } auto this_l (shared_from_this ()); connection->channel->send ( req, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { this_l->receive_block (); } else { if (this_l->connection->node->config.logging.bulk_pull_logging ()) { this_l->connection->node->logger.try_log (boost::str (boost::format ("Error sending bulk pull request to %1%: to %2%") % ec.message () % this_l->connection->channel->to_string ())); } this_l->connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_request_failure, nano::stat::dir::in); } }, false); // is bootstrap traffic is_dropable false } void nano::bulk_pull_client::receive_block () { auto this_l (shared_from_this ()); connection->channel->socket->async_read (connection->receive_buffer, 1, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { this_l->received_type (); } else { if (this_l->connection->node->config.logging.bulk_pull_logging ()) { this_l->connection->node->logger.try_log (boost::str (boost::format ("Error receiving block type: %1%") % ec.message ())); } this_l->connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_receive_block_failure, nano::stat::dir::in); } }); } void nano::bulk_pull_client::received_type () { auto this_l (shared_from_this ()); nano::block_type type (static_cast<nano::block_type> (connection->receive_buffer->data ()[0])); switch (type) { case nano::block_type::send: { connection->channel->socket->async_read (connection->receive_buffer, nano::send_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::receive: { connection->channel->socket->async_read (connection->receive_buffer, nano::receive_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::open: { connection->channel->socket->async_read (connection->receive_buffer, nano::open_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::change: { connection->channel->socket->async_read (connection->receive_buffer, nano::change_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::state: { connection->channel->socket->async_read (connection->receive_buffer, nano::state_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::not_a_block: { // Avoid re-using slow peers, or peers that sent the wrong blocks. if (!connection->pending_stop && expected == pull.end) { connection->attempt->pool_connection (connection); } break; } default: { if (connection->node->config.logging.network_packet_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Unknown type received as block type: %1%") % static_cast<int> (type))); } break; } } } void nano::bulk_pull_client::received_block (boost::system::error_code const & ec, size_t size_a, nano::block_type type_a) { if (!ec) { nano::bufferstream stream (connection->receive_buffer->data (), size_a); std::shared_ptr<nano::block> block (nano::deserialize_block (stream, type_a)); if (block != nullptr && !nano::work_validate (*block)) { auto hash (block->hash ()); if (connection->node->config.logging.bulk_pull_logging ()) { std::string block_l; block->serialize_json (block_l); connection->node->logger.try_log (boost::str (boost::format ("Pulled block %1% %2%") % hash.to_string () % block_l)); } // Is block expected? bool block_expected (false); if (hash == expected) { expected = block->previous (); block_expected = true; } else { unexpected_count++; } if (total_blocks == 0 && block_expected) { known_account = block->account (); } if (connection->block_count++ == 0) { connection->start_time = std::chrono::steady_clock::now (); } connection->attempt->total_blocks++; total_blocks++; bool stop_pull (connection->attempt->process_block (block, known_account, total_blocks, block_expected)); if (!stop_pull && !connection->hard_stop.load ()) { /* Process block in lazy pull if not stopped Stop usual pull request with unexpected block & more than 16k blocks processed to prevent spam */ if (connection->attempt->mode != nano::bootstrap_mode::legacy || unexpected_count < 16384) { receive_block (); } } else if (stop_pull && block_expected) { expected = pull.end; connection->attempt->pool_connection (connection); } if (stop_pull) { connection->attempt->lazy_stopped++; } } else { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log ("Error deserializing block received from pull request"); } connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_deserialize_receive_block, nano::stat::dir::in); } } else { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Error bulk receiving block: %1%") % ec.message ())); } connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_receive_block_failure, nano::stat::dir::in); } } nano::bulk_push_client::bulk_push_client (std::shared_ptr<nano::bootstrap_client> const & connection_a) : connection (connection_a) { } nano::bulk_push_client::~bulk_push_client () { } void nano::bulk_push_client::start () { nano::bulk_push message; auto this_l (shared_from_this ()); connection->channel->send ( message, [this_l](boost::system::error_code const & ec, size_t size_a) { auto transaction (this_l->connection->node->store.tx_begin_read ()); if (!ec) { this_l->push (transaction); } else { if (this_l->connection->node->config.logging.bulk_pull_logging ()) { this_l->connection->node->logger.try_log (boost::str (boost::format ("Unable to send bulk_push request: %1%") % ec.message ())); } } }, false); // is bootstrap traffic is_dropable false } void nano::bulk_push_client::push (nano::transaction const & transaction_a) { std::shared_ptr<nano::block> block; bool finished (false); while (block == nullptr && !finished) { if (current_target.first.is_zero () || current_target.first == current_target.second) { std::lock_guard<std::mutex> guard (connection->attempt->mutex); if (!connection->attempt->bulk_push_targets.empty ()) { current_target = connection->attempt->bulk_push_targets.back (); connection->attempt->bulk_push_targets.pop_back (); } else { finished = true; } } if (!finished) { block = connection->node->store.block_get (transaction_a, current_target.first); if (block == nullptr) { current_target.first = nano::block_hash (0); } else { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log ("Bulk pushing range ", current_target.first.to_string (), " down to ", current_target.second.to_string ()); } } } } if (finished) { send_finished (); } else { current_target.first = block->previous (); push_block (*block); } } void nano::bulk_push_client::send_finished () { auto buffer (std::make_shared<std::vector<uint8_t>> ()); buffer->push_back (static_cast<uint8_t> (nano::block_type::not_a_block)); auto this_l (shared_from_this ()); connection->channel->send_buffer (buffer, nano::stat::detail::all, [this_l](boost::system::error_code const & ec, size_t size_a) { try { this_l->promise.set_value (false); } catch (std::future_error &) { } }); } void nano::bulk_push_client::push_block (nano::block const & block_a) { auto buffer (std::make_shared<std::vector<uint8_t>> ()); { nano::vectorstream stream (*buffer); nano::serialize_block (stream, block_a); } auto this_l (shared_from_this ()); connection->channel->send_buffer (buffer, nano::stat::detail::all, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { auto transaction (this_l->connection->node->store.tx_begin_read ()); this_l->push (transaction); } else { if (this_l->connection->node->config.logging.bulk_pull_logging ()) { this_l->connection->node->logger.try_log (boost::str (boost::format ("Error sending block during bulk push: %1%") % ec.message ())); } } }); } nano::bulk_pull_account_client::bulk_pull_account_client (std::shared_ptr<nano::bootstrap_client> connection_a, nano::account const & account_a) : connection (connection_a), account (account_a), total_blocks (0) { connection->attempt->condition.notify_all (); } nano::bulk_pull_account_client::~bulk_pull_account_client () { { std::lock_guard<std::mutex> mutex (connection->attempt->mutex); --connection->attempt->pulling; } connection->attempt->condition.notify_all (); } void nano::bulk_pull_account_client::request () { nano::bulk_pull_account req; req.account = account; req.minimum_amount = connection->node->config.receive_minimum; req.flags = nano::bulk_pull_account_flags::pending_hash_and_amount; if (connection->node->config.logging.bulk_pull_logging ()) { std::unique_lock<std::mutex> lock (connection->attempt->mutex); connection->node->logger.try_log (boost::str (boost::format ("Requesting pending for account %1% from %2%. %3% accounts in queue") % req.account.to_account () % connection->channel->to_string () % connection->attempt->wallet_accounts.size ())); } else if (connection->node->config.logging.network_logging () && connection->attempt->should_log ()) { std::unique_lock<std::mutex> lock (connection->attempt->mutex); connection->node->logger.always_log (boost::str (boost::format ("%1% accounts in pull queue") % connection->attempt->wallet_accounts.size ())); } auto this_l (shared_from_this ()); connection->channel->send ( req, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { this_l->receive_pending (); } else { this_l->connection->attempt->requeue_pending (this_l->account); if (this_l->connection->node->config.logging.bulk_pull_logging ()) { this_l->connection->node->logger.try_log (boost::str (boost::format ("Error starting bulk pull request to %1%: to %2%") % ec.message () % this_l->connection->channel->to_string ())); } this_l->connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_error_starting_request, nano::stat::dir::in); } }, false); // is bootstrap traffic is_dropable false } void nano::bulk_pull_account_client::receive_pending () { auto this_l (shared_from_this ()); size_t size_l (sizeof (nano::uint256_union) + sizeof (nano::uint128_union)); connection->channel->socket->async_read (connection->receive_buffer, size_l, [this_l, size_l](boost::system::error_code const & ec, size_t size_a) { // An issue with asio is that sometimes, instead of reporting a bad file descriptor during disconnect, // we simply get a size of 0. if (size_a == size_l) { if (!ec) { nano::block_hash pending; nano::bufferstream frontier_stream (this_l->connection->receive_buffer->data (), sizeof (nano::uint256_union)); auto error1 (nano::try_read (frontier_stream, pending)); assert (!error1); nano::amount balance; nano::bufferstream balance_stream (this_l->connection->receive_buffer->data () + sizeof (nano::uint256_union), sizeof (nano::uint128_union)); auto error2 (nano::try_read (balance_stream, balance)); assert (!error2); if (this_l->total_blocks == 0 || !pending.is_zero ()) { if (this_l->total_blocks == 0 || balance.number () >= this_l->connection->node->config.receive_minimum.number ()) { this_l->total_blocks++; { if (!pending.is_zero ()) { auto transaction (this_l->connection->node->store.tx_begin_read ()); if (!this_l->connection->node->store.block_exists (transaction, pending)) { this_l->connection->attempt->lazy_start (pending); } } } this_l->receive_pending (); } else { this_l->connection->attempt->requeue_pending (this_l->account); } } else { this_l->connection->attempt->pool_connection (this_l->connection); } } else { this_l->connection->attempt->requeue_pending (this_l->account); if (this_l->connection->node->config.logging.network_logging ()) { this_l->connection->node->logger.try_log (boost::str (boost::format ("Error while receiving bulk pull account frontier %1%") % ec.message ())); } } } else { this_l->connection->attempt->requeue_pending (this_l->account); if (this_l->connection->node->config.logging.network_message_logging ()) { this_l->connection->node->logger.try_log (boost::str (boost::format ("Invalid size: expected %1%, got %2%") % size_l % size_a)); } } }); } nano::pull_info::pull_info (nano::account const & account_a, nano::block_hash const & head_a, nano::block_hash const & end_a, count_t count_a) : account (account_a), head (head_a), head_original (head_a), end (end_a), count (count_a) { } nano::bootstrap_attempt::bootstrap_attempt (std::shared_ptr<nano::node> node_a) : next_log (std::chrono::steady_clock::now ()), connections (0), pulling (0), node (node_a), account_count (0), total_blocks (0), runs_count (0), stopped (false), mode (nano::bootstrap_mode::legacy), lazy_stopped (0) { node->logger.always_log ("Starting bootstrap attempt"); node->bootstrap_initiator.notify_listeners (true); } nano::bootstrap_attempt::~bootstrap_attempt () { node->logger.always_log ("Exiting bootstrap attempt"); node->bootstrap_initiator.notify_listeners (false); } bool nano::bootstrap_attempt::should_log () { std::lock_guard<std::mutex> lock (mutex); auto result (false); auto now (std::chrono::steady_clock::now ()); if (next_log < now) { result = true; next_log = now + std::chrono::seconds (15); } return result; } bool nano::bootstrap_attempt::request_frontier (std::unique_lock<std::mutex> & lock_a) { auto result (true); auto connection_l (connection (lock_a)); connection_frontier_request = connection_l; if (connection_l) { std::future<bool> future; { auto client (std::make_shared<nano::frontier_req_client> (connection_l)); client->run (); frontiers = client; future = client->promise.get_future (); } lock_a.unlock (); result = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception. lock_a.lock (); if (result) { pulls.clear (); } if (node->config.logging.network_logging ()) { if (!result) { node->logger.try_log (boost::str (boost::format ("Completed frontier request, %1% out of sync accounts according to %2%") % pulls.size () % connection_l->channel->to_string ())); } else { node->stats.inc (nano::stat::type::error, nano::stat::detail::frontier_req, nano::stat::dir::out); } } } return result; } void nano::bootstrap_attempt::request_pull (std::unique_lock<std::mutex> & lock_a) { auto connection_l (connection (lock_a)); if (connection_l) { auto pull (pulls.front ()); pulls.pop_front (); if (mode != nano::bootstrap_mode::legacy) { // Check if pull is obsolete (head was processed) std::unique_lock<std::mutex> lock (lazy_mutex); auto transaction (node->store.tx_begin_read ()); while (!pulls.empty () && !pull.head.is_zero () && (lazy_blocks.find (pull.head) != lazy_blocks.end () || node->store.block_exists (transaction, pull.head))) { pull = pulls.front (); pulls.pop_front (); } } ++pulling; // The bulk_pull_client destructor attempt to requeue_pull which can cause a deadlock if this is the last reference // Dispatch request in an external thread in case it needs to be destroyed node->background ([connection_l, pull]() { auto client (std::make_shared<nano::bulk_pull_client> (connection_l, pull)); client->request (); }); } } void nano::bootstrap_attempt::request_push (std::unique_lock<std::mutex> & lock_a) { bool error (false); if (auto connection_shared = connection_frontier_request.lock ()) { std::future<bool> future; { auto client (std::make_shared<nano::bulk_push_client> (connection_shared)); client->start (); push = client; future = client->promise.get_future (); } lock_a.unlock (); error = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception. lock_a.lock (); } if (node->config.logging.network_logging ()) { node->logger.try_log ("Exiting bulk push client"); if (error) { node->logger.try_log ("Bulk push client failed"); } } } bool nano::bootstrap_attempt::still_pulling () { assert (!mutex.try_lock ()); auto running (!stopped); auto more_pulls (!pulls.empty ()); auto still_pulling (pulling > 0); return running && (more_pulls || still_pulling); } void nano::bootstrap_attempt::run () { populate_connections (); std::unique_lock<std::mutex> lock (mutex); auto frontier_failure (true); while (!stopped && frontier_failure) { frontier_failure = request_frontier (lock); } // Shuffle pulls. release_assert (std::numeric_limits<CryptoPP::word32>::max () > pulls.size ()); if (!pulls.empty ()) { for (auto i = static_cast<CryptoPP::word32> (pulls.size () - 1); i > 0; --i) { auto k = nano::random_pool::generate_word32 (0, i); std::swap (pulls[i], pulls[k]); } } while (still_pulling ()) { while (still_pulling ()) { if (!pulls.empty ()) { if (!node->block_processor.full ()) { request_pull (lock); } else { condition.wait_for (lock, std::chrono::seconds (15)); } } else { condition.wait (lock); } } // Flushing may resolve forks which can add more pulls node->logger.try_log ("Flushing unchecked blocks"); lock.unlock (); node->block_processor.flush (); lock.lock (); node->logger.try_log ("Finished flushing unchecked blocks"); } if (!stopped) { node->logger.try_log ("Completed pulls"); request_push (lock); runs_count++; // Start wallet lazy bootstrap if required if (!wallet_accounts.empty () && !node->flags.disable_wallet_bootstrap) { lock.unlock (); mode = nano::bootstrap_mode::wallet_lazy; wallet_run (); lock.lock (); } // Start lazy bootstrap if some lazy keys were inserted else if (runs_count < 3 && !lazy_finished () && !node->flags.disable_lazy_bootstrap) { lock.unlock (); mode = nano::bootstrap_mode::lazy; lazy_run (); lock.lock (); } if (!node->flags.disable_unchecked_cleanup) { node->unchecked_cleanup (); } } stopped = true; condition.notify_all (); idle.clear (); } std::shared_ptr<nano::bootstrap_client> nano::bootstrap_attempt::connection (std::unique_lock<std::mutex> & lock_a) { while (!stopped && idle.empty ()) { condition.wait (lock_a); } std::shared_ptr<nano::bootstrap_client> result; if (!idle.empty ()) { result = idle.back (); idle.pop_back (); } return result; } bool nano::bootstrap_attempt::consume_future (std::future<bool> & future_a) { bool result; try { result = future_a.get (); } catch (std::future_error &) { result = true; } return result; } struct block_rate_cmp { bool operator() (const std::shared_ptr<nano::bootstrap_client> & lhs, const std::shared_ptr<nano::bootstrap_client> & rhs) const { return lhs->block_rate () > rhs->block_rate (); } }; unsigned nano::bootstrap_attempt::target_connections (size_t pulls_remaining) { if (node->config.bootstrap_connections >= node->config.bootstrap_connections_max) { return std::max (1U, node->config.bootstrap_connections_max); } // Only scale up to bootstrap_connections_max for large pulls. double step = std::min (1.0, std::max (0.0, (double)pulls_remaining / bootstrap_connection_scale_target_blocks)); double target = (double)node->config.bootstrap_connections + (double)(node->config.bootstrap_connections_max - node->config.bootstrap_connections) * step; return std::max (1U, (unsigned)(target + 0.5f)); } void nano::bootstrap_attempt::populate_connections () { double rate_sum = 0.0; size_t num_pulls = 0; std::priority_queue<std::shared_ptr<nano::bootstrap_client>, std::vector<std::shared_ptr<nano::bootstrap_client>>, block_rate_cmp> sorted_connections; std::unordered_set<nano::tcp_endpoint> endpoints; { std::unique_lock<std::mutex> lock (mutex); num_pulls = pulls.size (); std::deque<std::weak_ptr<nano::bootstrap_client>> new_clients; for (auto & c : clients) { if (auto client = c.lock ()) { new_clients.push_back (client); endpoints.insert (client->channel->socket->remote_endpoint ()); double elapsed_sec = client->elapsed_seconds (); auto blocks_per_sec = client->block_rate (); rate_sum += blocks_per_sec; if (client->elapsed_seconds () > bootstrap_connection_warmup_time_sec && client->block_count > 0) { sorted_connections.push (client); } // Force-stop the slowest peers, since they can take the whole bootstrap hostage by dribbling out blocks on the last remaining pull. // This is ~1.5kilobits/sec. if (elapsed_sec > bootstrap_minimum_termination_time_sec && blocks_per_sec < bootstrap_minimum_blocks_per_sec) { if (node->config.logging.bulk_pull_logging ()) { node->logger.try_log (boost::str (boost::format ("Stopping slow peer %1% (elapsed sec %2%s > %3%s and %4% blocks per second < %5%)") % client->channel->to_string () % elapsed_sec % bootstrap_minimum_termination_time_sec % blocks_per_sec % bootstrap_minimum_blocks_per_sec)); } client->stop (true); } } } // Cleanup expired clients clients.swap (new_clients); } auto target = target_connections (num_pulls); // We only want to drop slow peers when more than 2/3 are active. 2/3 because 1/2 is too aggressive, and 100% rarely happens. // Probably needs more tuning. if (sorted_connections.size () >= (target * 2) / 3 && target >= 4) { // 4 -> 1, 8 -> 2, 16 -> 4, arbitrary, but seems to work well. auto drop = (int)roundf (sqrtf ((float)target - 2.0f)); if (node->config.logging.bulk_pull_logging ()) { node->logger.try_log (boost::str (boost::format ("Dropping %1% bulk pull peers, target connections %2%") % drop % target)); } for (int i = 0; i < drop; i++) { auto client = sorted_connections.top (); if (node->config.logging.bulk_pull_logging ()) { node->logger.try_log (boost::str (boost::format ("Dropping peer with block rate %1%, block count %2% (%3%) ") % client->block_rate () % client->block_count % client->channel->to_string ())); } client->stop (false); sorted_connections.pop (); } } if (node->config.logging.bulk_pull_logging ()) { std::unique_lock<std::mutex> lock (mutex); node->logger.try_log (boost::str (boost::format ("Bulk pull connections: %1%, rate: %2% blocks/sec, remaining account pulls: %3%, total blocks: %4%") % connections.load () % (int)rate_sum % pulls.size () % (int)total_blocks.load ())); } if (connections < target) { auto delta = std::min ((target - connections) * 2, bootstrap_max_new_connections); // TODO - tune this better // Not many peers respond, need to try to make more connections than we need. for (auto i = 0u; i < delta; i++) { auto endpoint (node->network.bootstrap_peer ()); if (endpoint != nano::tcp_endpoint (boost::asio::ip::address_v6::any (), 0) && endpoints.find (endpoint) == endpoints.end ()) { connect_client (endpoint); std::lock_guard<std::mutex> lock (mutex); endpoints.insert (endpoint); } else if (connections == 0) { node->logger.try_log (boost::str (boost::format ("Bootstrap stopped because there are no peers"))); stopped = true; condition.notify_all (); } } } if (!stopped) { std::weak_ptr<nano::bootstrap_attempt> this_w (shared_from_this ()); node->alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (1), [this_w]() { if (auto this_l = this_w.lock ()) { this_l->populate_connections (); } }); } } void nano::bootstrap_attempt::add_connection (nano::endpoint const & endpoint_a) { connect_client (nano::tcp_endpoint (endpoint_a.address (), endpoint_a.port ())); } void nano::bootstrap_attempt::connect_client (nano::tcp_endpoint const & endpoint_a) { ++connections; auto socket (std::make_shared<nano::socket> (node)); auto this_l (shared_from_this ()); socket->async_connect (endpoint_a, [this_l, socket, endpoint_a](boost::system::error_code const & ec) { if (!ec) { if (this_l->node->config.logging.bulk_pull_logging ()) { this_l->node->logger.try_log (boost::str (boost::format ("Connection established to %1%") % endpoint_a)); } auto client (std::make_shared<nano::bootstrap_client> (this_l->node, this_l, std::make_shared<nano::transport::channel_tcp> (*this_l->node, socket))); this_l->pool_connection (client); } else { if (this_l->node->config.logging.network_logging ()) { switch (ec.value ()) { default: this_l->node->logger.try_log (boost::str (boost::format ("Error initiating bootstrap connection to %1%: %2%") % endpoint_a % ec.message ())); break; case boost::system::errc::connection_refused: case boost::system::errc::operation_canceled: case boost::system::errc::timed_out: case 995: //Windows The I/O operation has been aborted because of either a thread exit or an application request case 10061: //Windows No connection could be made because the target machine actively refused it break; } } } --this_l->connections; }); } void nano::bootstrap_attempt::pool_connection (std::shared_ptr<nano::bootstrap_client> client_a) { std::lock_guard<std::mutex> lock (mutex); if (!stopped && !client_a->pending_stop) { // Idle bootstrap client socket client_a->channel->socket->start_timer (node->network_params.node.idle_timeout); // Push into idle deque idle.push_front (client_a); } condition.notify_all (); } void nano::bootstrap_attempt::stop () { std::lock_guard<std::mutex> lock (mutex); stopped = true; condition.notify_all (); for (auto i : clients) { if (auto client = i.lock ()) { client->channel->socket->close (); } } if (auto i = frontiers.lock ()) { try { i->promise.set_value (true); } catch (std::future_error &) { } } if (auto i = push.lock ()) { try { i->promise.set_value (true); } catch (std::future_error &) { } } } void nano::bootstrap_attempt::add_pull (nano::pull_info const & pull_a) { nano::pull_info pull (pull_a); node->bootstrap_initiator.cache.update_pull (pull); { std::lock_guard<std::mutex> lock (mutex); pulls.push_back (pull); } condition.notify_all (); } void nano::bootstrap_attempt::requeue_pull (nano::pull_info const & pull_a) { auto pull (pull_a); if (++pull.attempts < (bootstrap_frontier_retry_limit + (pull.processed / 10000))) { std::lock_guard<std::mutex> lock (mutex); pulls.push_front (pull); condition.notify_all (); } else if (mode == nano::bootstrap_mode::lazy) { { // Retry for lazy pulls (not weak state block link assumptions) std::lock_guard<std::mutex> lock (mutex); pull.attempts++; pulls.push_back (pull); } condition.notify_all (); } else { if (node->config.logging.bulk_pull_logging ()) { node->logger.try_log (boost::str (boost::format ("Failed to pull account %1% down to %2% after %3% attempts and %4% blocks processed") % pull.account.to_account () % pull.end.to_string () % pull.attempts % pull.processed)); } node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_failed_account, nano::stat::dir::in); node->bootstrap_initiator.cache.add (pull); } } void nano::bootstrap_attempt::add_bulk_push_target (nano::block_hash const & head, nano::block_hash const & end) { std::lock_guard<std::mutex> lock (mutex); bulk_push_targets.push_back (std::make_pair (head, end)); } void nano::bootstrap_attempt::lazy_start (nano::block_hash const & hash_a) { std::unique_lock<std::mutex> lock (lazy_mutex); // Add start blocks, limit 1024 (32k with disabled legacy bootstrap) size_t max_keys (node->flags.disable_legacy_bootstrap ? 32 * 1024 : 1024); if (lazy_keys.size () < max_keys && lazy_keys.find (hash_a) == lazy_keys.end () && lazy_blocks.find (hash_a) == lazy_blocks.end ()) { lazy_keys.insert (hash_a); lazy_pulls.push_back (hash_a); } } void nano::bootstrap_attempt::lazy_add (nano::block_hash const & hash_a) { // Add only unknown blocks assert (!lazy_mutex.try_lock ()); if (lazy_blocks.find (hash_a) == lazy_blocks.end ()) { lazy_pulls.push_back (hash_a); } } void nano::bootstrap_attempt::lazy_pull_flush () { assert (!mutex.try_lock ()); std::unique_lock<std::mutex> lazy_lock (lazy_mutex); auto transaction (node->store.tx_begin_read ()); for (auto & pull_start : lazy_pulls) { // Recheck if block was already processed if (lazy_blocks.find (pull_start) == lazy_blocks.end () && !node->store.block_exists (transaction, pull_start)) { assert (node->network_params.bootstrap.lazy_max_pull_blocks <= std::numeric_limits<nano::pull_info::count_t>::max ()); pulls.push_back (nano::pull_info (pull_start, pull_start, nano::block_hash (0), static_cast<nano::pull_info::count_t> (node->network_params.bootstrap.lazy_max_pull_blocks))); } } lazy_pulls.clear (); } bool nano::bootstrap_attempt::lazy_finished () { bool result (true); auto transaction (node->store.tx_begin_read ()); std::unique_lock<std::mutex> lock (lazy_mutex); for (auto it (lazy_keys.begin ()), end (lazy_keys.end ()); it != end && !stopped;) { if (node->store.block_exists (transaction, *it)) { it = lazy_keys.erase (it); } else { result = false; break; // No need to increment `it` as we break above. } } // Finish lazy bootstrap without lazy pulls (in combination with still_pulling ()) if (!result && lazy_pulls.empty ()) { result = true; } return result; } void nano::bootstrap_attempt::lazy_clear () { assert (!lazy_mutex.try_lock ()); lazy_blocks.clear (); lazy_keys.clear (); lazy_pulls.clear (); lazy_state_unknown.clear (); lazy_balances.clear (); lazy_stopped = 0; } void nano::bootstrap_attempt::lazy_run () { populate_connections (); auto start_time (std::chrono::steady_clock::now ()); auto max_time (std::chrono::minutes (node->flags.disable_legacy_bootstrap ? 48 * 60 : 30)); std::unique_lock<std::mutex> lock (mutex); while ((still_pulling () || !lazy_finished ()) && lazy_stopped < lazy_max_stopped && std::chrono::steady_clock::now () - start_time < max_time) { unsigned iterations (0); while (still_pulling () && lazy_stopped < lazy_max_stopped && std::chrono::steady_clock::now () - start_time < max_time) { if (!pulls.empty ()) { if (!node->block_processor.full ()) { request_pull (lock); } else { condition.wait_for (lock, std::chrono::seconds (15)); } } else { condition.wait (lock); } ++iterations; // Flushing lazy pulls if (iterations % 100 == 0) { lazy_pull_flush (); } } // Flushing may resolve forks which can add more pulls // Flushing lazy pulls lock.unlock (); node->block_processor.flush (); lock.lock (); lazy_pull_flush (); } if (!stopped) { node->logger.try_log ("Completed lazy pulls"); std::unique_lock<std::mutex> lazy_lock (lazy_mutex); runs_count++; // Start wallet lazy bootstrap if required if (!wallet_accounts.empty () && !node->flags.disable_wallet_bootstrap) { pulls.clear (); lazy_clear (); mode = nano::bootstrap_mode::wallet_lazy; lock.unlock (); lazy_lock.unlock (); wallet_run (); lock.lock (); } // Fallback to legacy bootstrap else if (runs_count < 3 && !lazy_keys.empty () && !node->flags.disable_legacy_bootstrap) { pulls.clear (); lazy_clear (); mode = nano::bootstrap_mode::legacy; lock.unlock (); lazy_lock.unlock (); run (); lock.lock (); } } stopped = true; condition.notify_all (); idle.clear (); } bool nano::bootstrap_attempt::process_block (std::shared_ptr<nano::block> block_a, nano::account const & known_account_a, uint64_t total_blocks, bool block_expected) { bool stop_pull (false); if (mode != nano::bootstrap_mode::legacy && block_expected) { auto hash (block_a->hash ()); std::unique_lock<std::mutex> lock (lazy_mutex); // Processing new blocks if (lazy_blocks.find (hash) == lazy_blocks.end ()) { // Search block in ledger (old) auto transaction (node->store.tx_begin_read ()); if (!node->store.block_exists (transaction, block_a->type (), hash)) { nano::uint128_t balance (std::numeric_limits<nano::uint128_t>::max ()); nano::unchecked_info info (block_a, known_account_a, 0, nano::signature_verification::unknown); node->block_processor.add (info); // Search for new dependencies if (!block_a->source ().is_zero () && !node->store.block_exists (transaction, block_a->source ())) { lazy_add (block_a->source ()); } else if (block_a->type () == nano::block_type::send) { // Calculate balance for legacy send blocks std::shared_ptr<nano::send_block> block_l (std::static_pointer_cast<nano::send_block> (block_a)); if (block_l != nullptr) { balance = block_l->hashables.balance.number (); } } else if (block_a->type () == nano::block_type::state) { std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a)); if (block_l != nullptr) { balance = block_l->hashables.balance.number (); nano::block_hash link (block_l->hashables.link); // If link is not epoch link or 0. And if block from link unknown if (!link.is_zero () && link != node->ledger.epoch_link && lazy_blocks.find (link) == lazy_blocks.end () && !node->store.block_exists (transaction, link)) { nano::block_hash previous (block_l->hashables.previous); // If state block previous is 0 then source block required if (previous.is_zero ()) { lazy_add (link); } // In other cases previous block balance required to find out subtype of state block else if (node->store.block_exists (transaction, previous)) { nano::amount prev_balance (node->ledger.balance (transaction, previous)); if (prev_balance.number () <= balance) { lazy_add (link); } } // Search balance of already processed previous blocks else if (lazy_blocks.find (previous) != lazy_blocks.end ()) { auto previous_balance (lazy_balances.find (previous)); if (previous_balance != lazy_balances.end ()) { if (previous_balance->second <= balance) { lazy_add (link); } lazy_balances.erase (previous_balance); } } // Insert in unknown state blocks if previous wasn't already processed else { lazy_state_unknown.insert (std::make_pair (previous, std::make_pair (link, balance))); } } } } lazy_blocks.insert (hash); // Adding lazy balances if (total_blocks == 0) { lazy_balances.insert (std::make_pair (hash, balance)); } // Removing lazy balances if (!block_a->previous ().is_zero () && lazy_balances.find (block_a->previous ()) != lazy_balances.end ()) { lazy_balances.erase (block_a->previous ()); } } // Drop bulk_pull if block is already known (ledger) else { // Disabled until server rewrite // stop_pull = true; // Force drop lazy bootstrap connection for long bulk_pull if (total_blocks > node->network_params.bootstrap.lazy_max_pull_blocks) { stop_pull = true; } } //Search unknown state blocks balances auto find_state (lazy_state_unknown.find (hash)); if (find_state != lazy_state_unknown.end ()) { auto next_block (find_state->second); lazy_state_unknown.erase (hash); // Retrieve balance for previous state blocks if (block_a->type () == nano::block_type::state) { std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a)); if (block_l->hashables.balance.number () <= next_block.second) { lazy_add (next_block.first); } } // Retrieve balance for previous legacy send blocks else if (block_a->type () == nano::block_type::send) { std::shared_ptr<nano::send_block> block_l (std::static_pointer_cast<nano::send_block> (block_a)); if (block_l->hashables.balance.number () <= next_block.second) { lazy_add (next_block.first); } } // Weak assumption for other legacy block types else { // Disabled } } } // Drop bulk_pull if block is already known (processed set) else { // Disabled until server rewrite // stop_pull = true; // Force drop lazy bootstrap connection for long bulk_pull if (total_blocks > node->network_params.bootstrap.lazy_max_pull_blocks) { stop_pull = true; } } } else if (mode != nano::bootstrap_mode::legacy) { // Drop connection with unexpected block for lazy bootstrap stop_pull = true; } else { nano::unchecked_info info (block_a, known_account_a, 0, nano::signature_verification::unknown); node->block_processor.add (info); } return stop_pull; } void nano::bootstrap_attempt::request_pending (std::unique_lock<std::mutex> & lock_a) { auto connection_l (connection (lock_a)); if (connection_l) { auto account (wallet_accounts.front ()); wallet_accounts.pop_front (); ++pulling; // The bulk_pull_account_client destructor attempt to requeue_pull which can cause a deadlock if this is the last reference // Dispatch request in an external thread in case it needs to be destroyed node->background ([connection_l, account]() { auto client (std::make_shared<nano::bulk_pull_account_client> (connection_l, account)); client->request (); }); } } void nano::bootstrap_attempt::requeue_pending (nano::account const & account_a) { auto account (account_a); { std::lock_guard<std::mutex> lock (mutex); wallet_accounts.push_front (account); condition.notify_all (); } } void nano::bootstrap_attempt::wallet_start (std::deque<nano::account> & accounts_a) { std::lock_guard<std::mutex> lock (mutex); wallet_accounts.swap (accounts_a); } bool nano::bootstrap_attempt::wallet_finished () { assert (!mutex.try_lock ()); auto running (!stopped); auto more_accounts (!wallet_accounts.empty ()); auto still_pulling (pulling > 0); return running && (more_accounts || still_pulling); } void nano::bootstrap_attempt::wallet_run () { populate_connections (); auto start_time (std::chrono::steady_clock::now ()); auto max_time (std::chrono::minutes (10)); std::unique_lock<std::mutex> lock (mutex); while (wallet_finished () && std::chrono::steady_clock::now () - start_time < max_time) { if (!wallet_accounts.empty ()) { request_pending (lock); } else { condition.wait (lock); } } if (!stopped) { node->logger.try_log ("Completed wallet lazy pulls"); runs_count++; // Start lazy bootstrap if some lazy keys were inserted if (!lazy_finished ()) { lock.unlock (); lazy_run (); lock.lock (); } } stopped = true; condition.notify_all (); idle.clear (); } nano::bootstrap_initiator::bootstrap_initiator (nano::node & node_a) : node (node_a), stopped (false), thread ([this]() { nano::thread_role::set (nano::thread_role::name::bootstrap_initiator); run_bootstrap (); }) { } nano::bootstrap_initiator::~bootstrap_initiator () { stop (); } void nano::bootstrap_initiator::bootstrap () { std::unique_lock<std::mutex> lock (mutex); if (!stopped && attempt == nullptr) { node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out); attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ()); condition.notify_all (); } } void nano::bootstrap_initiator::bootstrap (nano::endpoint const & endpoint_a, bool add_to_peers) { if (add_to_peers) { node.network.udp_channels.insert (nano::transport::map_endpoint_to_v6 (endpoint_a), nano::protocol_version); } std::unique_lock<std::mutex> lock (mutex); if (!stopped) { while (attempt != nullptr) { attempt->stop (); condition.wait (lock); } node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out); attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ()); attempt->add_connection (endpoint_a); condition.notify_all (); } } void nano::bootstrap_initiator::bootstrap_lazy (nano::block_hash const & hash_a, bool force) { { std::unique_lock<std::mutex> lock (mutex); if (force) { while (attempt != nullptr) { attempt->stop (); condition.wait (lock); } } node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_lazy, nano::stat::dir::out); if (attempt == nullptr) { attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ()); attempt->mode = nano::bootstrap_mode::lazy; } attempt->lazy_start (hash_a); } condition.notify_all (); } void nano::bootstrap_initiator::bootstrap_wallet (std::deque<nano::account> & accounts_a) { { std::unique_lock<std::mutex> lock (mutex); node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_wallet_lazy, nano::stat::dir::out); if (attempt == nullptr) { attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ()); attempt->mode = nano::bootstrap_mode::wallet_lazy; } attempt->wallet_start (accounts_a); } condition.notify_all (); } void nano::bootstrap_initiator::run_bootstrap () { std::unique_lock<std::mutex> lock (mutex); while (!stopped) { if (attempt != nullptr) { lock.unlock (); if (attempt->mode == nano::bootstrap_mode::legacy) { attempt->run (); } else if (attempt->mode == nano::bootstrap_mode::lazy) { attempt->lazy_run (); } else { attempt->wallet_run (); } lock.lock (); attempt = nullptr; condition.notify_all (); } else { condition.wait (lock); } } } void nano::bootstrap_initiator::add_observer (std::function<void(bool)> const & observer_a) { std::lock_guard<std::mutex> lock (observers_mutex); observers.push_back (observer_a); } bool nano::bootstrap_initiator::in_progress () { return current_attempt () != nullptr; } std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::current_attempt () { std::lock_guard<std::mutex> lock (mutex); return attempt; } void nano::bootstrap_initiator::stop () { if (!stopped.exchange (true)) { { std::lock_guard<std::mutex> guard (mutex); if (attempt != nullptr) { attempt->stop (); } } condition.notify_all (); if (thread.joinable ()) { thread.join (); } } } void nano::bootstrap_initiator::notify_listeners (bool in_progress_a) { std::lock_guard<std::mutex> lock (observers_mutex); for (auto & i : observers) { i (in_progress_a); } } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (bootstrap_initiator & bootstrap_initiator, const std::string & name) { size_t count = 0; size_t cache_count = 0; { std::lock_guard<std::mutex> guard (bootstrap_initiator.observers_mutex); count = bootstrap_initiator.observers.size (); } { std::lock_guard<std::mutex> guard (bootstrap_initiator.cache.pulls_cache_mutex); cache_count = bootstrap_initiator.cache.cache.size (); } auto sizeof_element = sizeof (decltype (bootstrap_initiator.observers)::value_type); auto sizeof_cache_element = sizeof (decltype (bootstrap_initiator.cache.cache)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "observers", count, sizeof_element })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "pulls_cache", cache_count, sizeof_cache_element })); return composite; } } nano::bootstrap_listener::bootstrap_listener (uint16_t port_a, nano::node & node_a) : node (node_a), port (port_a) { } void nano::bootstrap_listener::start () { listening_socket = std::make_shared<nano::server_socket> (node.shared (), boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::any (), port), node.config.tcp_incoming_connections_max); boost::system::error_code ec; listening_socket->start (ec); if (ec) { node.logger.try_log (boost::str (boost::format ("Error while binding for incoming TCP/bootstrap on port %1%: %2%") % listening_socket->listening_port () % ec.message ())); throw std::runtime_error (ec.message ()); } listening_socket->on_connection ([this](std::shared_ptr<nano::socket> new_connection, boost::system::error_code const & ec_a) { bool keep_accepting = true; if (ec_a) { keep_accepting = false; this->node.logger.try_log (boost::str (boost::format ("Error while accepting incoming TCP/bootstrap connections: %1%") % ec_a.message ())); } else { accept_action (ec_a, new_connection); } return keep_accepting; }); } void nano::bootstrap_listener::stop () { decltype (connections) connections_l; { std::lock_guard<std::mutex> lock (mutex); on = false; connections_l.swap (connections); } if (listening_socket) { listening_socket->close (); listening_socket = nullptr; } } size_t nano::bootstrap_listener::connection_count () { std::lock_guard<std::mutex> lock (mutex); return connections.size (); } void nano::bootstrap_listener::accept_action (boost::system::error_code const & ec, std::shared_ptr<nano::socket> socket_a) { auto connection (std::make_shared<nano::bootstrap_server> (socket_a, node.shared ())); { std::lock_guard<std::mutex> lock (mutex); connections[connection.get ()] = connection; connection->receive (); } } boost::asio::ip::tcp::endpoint nano::bootstrap_listener::endpoint () { return boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::loopback (), listening_socket->listening_port ()); } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (bootstrap_listener & bootstrap_listener, const std::string & name) { auto sizeof_element = sizeof (decltype (bootstrap_listener.connections)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "connections", bootstrap_listener.connection_count (), sizeof_element })); return composite; } } nano::bootstrap_server::~bootstrap_server () { if (node->config.logging.bulk_pull_logging ()) { node->logger.try_log ("Exiting incoming TCP/bootstrap server"); } if (type == nano::bootstrap_server_type::bootstrap) { --node->bootstrap.bootstrap_count; } else if (type == nano::bootstrap_server_type::realtime) { --node->bootstrap.realtime_count; node->network.response_channels.remove (remote_endpoint); // Clear temporary channel auto exisiting_response_channel (node->network.tcp_channels.find_channel (remote_endpoint)); if (exisiting_response_channel != nullptr) { exisiting_response_channel->server = false; node->network.tcp_channels.erase (remote_endpoint); } } stop (); std::lock_guard<std::mutex> lock (node->bootstrap.mutex); node->bootstrap.connections.erase (this); } void nano::bootstrap_server::stop () { if (!stopped.exchange (true)) { if (socket != nullptr) { socket->close (); } } } nano::bootstrap_server::bootstrap_server (std::shared_ptr<nano::socket> socket_a, std::shared_ptr<nano::node> node_a) : receive_buffer (std::make_shared<std::vector<uint8_t>> ()), socket (socket_a), node (node_a) { receive_buffer->resize (512); } void nano::bootstrap_server::receive () { // Increase timeout to receive TCP header (idle server socket) socket->set_timeout (node->network_params.node.idle_timeout); auto this_l (shared_from_this ()); socket->async_read (receive_buffer, 8, [this_l](boost::system::error_code const & ec, size_t size_a) { // Set remote_endpoint if (this_l->remote_endpoint.port () == 0) { this_l->remote_endpoint = this_l->socket->remote_endpoint (); } // Decrease timeout to default this_l->socket->set_timeout (this_l->node->config.tcp_io_timeout); // Receive header this_l->receive_header_action (ec, size_a); }); } void nano::bootstrap_server::receive_header_action (boost::system::error_code const & ec, size_t size_a) { if (!ec) { assert (size_a == 8); nano::bufferstream type_stream (receive_buffer->data (), size_a); auto error (false); nano::message_header header (error, type_stream); if (!error) { switch (header.type) { case nano::message_type::bulk_pull: { node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull, nano::stat::dir::in); auto this_l (shared_from_this ()); socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) { this_l->receive_bulk_pull_action (ec, size_a, header); }); break; } case nano::message_type::bulk_pull_account: { node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_account, nano::stat::dir::in); auto this_l (shared_from_this ()); socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) { this_l->receive_bulk_pull_account_action (ec, size_a, header); }); break; } case nano::message_type::frontier_req: { node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::frontier_req, nano::stat::dir::in); auto this_l (shared_from_this ()); socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) { this_l->receive_frontier_req_action (ec, size_a, header); }); break; } case nano::message_type::bulk_push: { node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_push, nano::stat::dir::in); if (is_bootstrap_connection ()) { add_request (std::unique_ptr<nano::message> (new nano::bulk_push (header))); } break; } case nano::message_type::keepalive: { auto this_l (shared_from_this ()); socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) { this_l->receive_keepalive_action (ec, size_a, header); }); break; } case nano::message_type::publish: { auto this_l (shared_from_this ()); socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) { this_l->receive_publish_action (ec, size_a, header); }); break; } case nano::message_type::confirm_ack: { auto this_l (shared_from_this ()); socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) { this_l->receive_confirm_ack_action (ec, size_a, header); }); break; } case nano::message_type::confirm_req: { auto this_l (shared_from_this ()); socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) { this_l->receive_confirm_req_action (ec, size_a, header); }); break; } case nano::message_type::node_id_handshake: { auto this_l (shared_from_this ()); socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) { this_l->receive_node_id_handshake_action (ec, size_a, header); }); break; } default: { if (node->config.logging.network_logging ()) { node->logger.try_log (boost::str (boost::format ("Received invalid type from bootstrap connection %1%") % static_cast<uint8_t> (header.type))); } break; } } } } else { if (node->config.logging.bulk_pull_logging ()) { node->logger.try_log (boost::str (boost::format ("Error while receiving type: %1%") % ec.message ())); } } } void nano::bootstrap_server::receive_bulk_pull_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a) { if (!ec) { auto error (false); nano::bufferstream stream (receive_buffer->data (), size_a); std::unique_ptr<nano::bulk_pull> request (new nano::bulk_pull (error, stream, header_a)); if (!error) { if (node->config.logging.bulk_pull_logging ()) { node->logger.try_log (boost::str (boost::format ("Received bulk pull for %1% down to %2%, maximum of %3%") % request->start.to_string () % request->end.to_string () % (request->count ? request->count : std::numeric_limits<double>::infinity ()))); } if (is_bootstrap_connection ()) { add_request (std::unique_ptr<nano::message> (request.release ())); } receive (); } } } void nano::bootstrap_server::receive_bulk_pull_account_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a) { if (!ec) { auto error (false); assert (size_a == header_a.payload_length_bytes ()); nano::bufferstream stream (receive_buffer->data (), size_a); std::unique_ptr<nano::bulk_pull_account> request (new nano::bulk_pull_account (error, stream, header_a)); if (!error) { if (node->config.logging.bulk_pull_logging ()) { node->logger.try_log (boost::str (boost::format ("Received bulk pull account for %1% with a minimum amount of %2%") % request->account.to_account () % nano::amount (request->minimum_amount).format_balance (nano::Mxrb_ratio, 10, true))); } if (is_bootstrap_connection ()) { add_request (std::unique_ptr<nano::message> (request.release ())); } receive (); } } } void nano::bootstrap_server::receive_frontier_req_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a) { if (!ec) { auto error (false); nano::bufferstream stream (receive_buffer->data (), size_a); std::unique_ptr<nano::frontier_req> request (new nano::frontier_req (error, stream, header_a)); if (!error) { if (node->config.logging.bulk_pull_logging ()) { node->logger.try_log (boost::str (boost::format ("Received frontier request for %1% with age %2%") % request->start.to_string () % request->age)); } if (is_bootstrap_connection ()) { add_request (std::unique_ptr<nano::message> (request.release ())); } receive (); } } else { if (node->config.logging.network_logging ()) { node->logger.try_log (boost::str (boost::format ("Error sending receiving frontier request: %1%") % ec.message ())); } } } void nano::bootstrap_server::receive_keepalive_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a) { if (!ec) { auto error (false); nano::bufferstream stream (receive_buffer->data (), size_a); std::unique_ptr<nano::keepalive> request (new nano::keepalive (error, stream, header_a)); if (!error) { if (type == nano::bootstrap_server_type::realtime || type == nano::bootstrap_server_type::realtime_response_server) { add_request (std::unique_ptr<nano::message> (request.release ())); } receive (); } } else { if (node->config.logging.network_keepalive_logging ()) { node->logger.try_log (boost::str (boost::format ("Error receiving keepalive: %1%") % ec.message ())); } } } void nano::bootstrap_server::receive_publish_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a) { if (!ec) { auto error (false); nano::bufferstream stream (receive_buffer->data (), size_a); std::unique_ptr<nano::publish> request (new nano::publish (error, stream, header_a)); if (!error) { if (type == nano::bootstrap_server_type::realtime || type == nano::bootstrap_server_type::realtime_response_server) { add_request (std::unique_ptr<nano::message> (request.release ())); } receive (); } } else { if (node->config.logging.network_message_logging ()) { node->logger.try_log (boost::str (boost::format ("Error receiving publish: %1%") % ec.message ())); } } } void nano::bootstrap_server::receive_confirm_req_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a) { if (!ec) { auto error (false); nano::bufferstream stream (receive_buffer->data (), size_a); std::unique_ptr<nano::confirm_req> request (new nano::confirm_req (error, stream, header_a)); if (!error) { if (type == nano::bootstrap_server_type::realtime || type == nano::bootstrap_server_type::realtime_response_server) { add_request (std::unique_ptr<nano::message> (request.release ())); } receive (); } } else if (node->config.logging.network_message_logging ()) { node->logger.try_log (boost::str (boost::format ("Error receiving confirm_req: %1%") % ec.message ())); } } void nano::bootstrap_server::receive_confirm_ack_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a) { if (!ec) { auto error (false); nano::bufferstream stream (receive_buffer->data (), size_a); std::unique_ptr<nano::confirm_ack> request (new nano::confirm_ack (error, stream, header_a)); if (!error) { if (type == nano::bootstrap_server_type::realtime || type == nano::bootstrap_server_type::realtime_response_server) { add_request (std::unique_ptr<nano::message> (request.release ())); } receive (); } } else if (node->config.logging.network_message_logging ()) { node->logger.try_log (boost::str (boost::format ("Error receiving confirm_ack: %1%") % ec.message ())); } } void nano::bootstrap_server::receive_node_id_handshake_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a) { if (!ec) { auto error (false); nano::bufferstream stream (receive_buffer->data (), size_a); std::unique_ptr<nano::node_id_handshake> request (new nano::node_id_handshake (error, stream, header_a)); if (!error) { if (type == nano::bootstrap_server_type::undefined && !node->flags.disable_tcp_realtime) { add_request (std::unique_ptr<nano::message> (request.release ())); } receive (); } } else if (node->config.logging.network_node_id_handshake_logging ()) { node->logger.try_log (boost::str (boost::format ("Error receiving node_id_handshake: %1%") % ec.message ())); } } void nano::bootstrap_server::add_request (std::unique_ptr<nano::message> message_a) { assert (message_a != nullptr); std::lock_guard<std::mutex> lock (mutex); auto start (requests.empty ()); requests.push (std::move (message_a)); if (start) { run_next (); } } void nano::bootstrap_server::finish_request () { std::lock_guard<std::mutex> lock (mutex); requests.pop (); if (!requests.empty ()) { run_next (); } else { std::weak_ptr<nano::bootstrap_server> this_w (shared_from_this ()); node->alarm.add (std::chrono::steady_clock::now () + (node->config.tcp_io_timeout * 2) + std::chrono::seconds (1), [this_w]() { if (auto this_l = this_w.lock ()) { this_l->timeout (); } }); } } void nano::bootstrap_server::finish_request_async () { std::weak_ptr<nano::bootstrap_server> this_w (shared_from_this ()); node->background ([this_w]() { if (auto this_l = this_w.lock ()) { this_l->finish_request (); } }); } void nano::bootstrap_server::timeout () { if (socket != nullptr) { if (socket->has_timed_out ()) { if (node->config.logging.bulk_pull_logging ()) { node->logger.try_log ("Closing incoming tcp / bootstrap server by timeout"); } { std::lock_guard<std::mutex> lock (node->bootstrap.mutex); node->bootstrap.connections.erase (this); } socket->close (); } } else { std::lock_guard<std::mutex> lock (node->bootstrap.mutex); node->bootstrap.connections.erase (this); } } namespace { class request_response_visitor : public nano::message_visitor { public: request_response_visitor (std::shared_ptr<nano::bootstrap_server> connection_a) : connection (connection_a) { } virtual ~request_response_visitor () = default; void keepalive (nano::keepalive const & message_a) override { bool first_keepalive (connection->keepalive_first); if (first_keepalive) { connection->keepalive_first = false; } connection->finish_request_async (); auto connection_l (connection->shared_from_this ()); connection->node->background ([connection_l, message_a, first_keepalive]() { connection_l->node->network.tcp_channels.process_keepalive (message_a, connection_l->remote_endpoint, first_keepalive); }); } void publish (nano::publish const & message_a) override { connection->finish_request_async (); auto connection_l (connection->shared_from_this ()); connection->node->background ([connection_l, message_a]() { connection_l->node->network.tcp_channels.process_message (message_a, connection_l->remote_endpoint, connection_l->remote_node_id, connection_l->socket, connection_l->type); }); } void confirm_req (nano::confirm_req const & message_a) override { connection->finish_request_async (); auto connection_l (connection->shared_from_this ()); connection->node->background ([connection_l, message_a]() { connection_l->node->network.tcp_channels.process_message (message_a, connection_l->remote_endpoint, connection_l->remote_node_id, connection_l->socket, connection_l->type); }); } void confirm_ack (nano::confirm_ack const & message_a) override { connection->finish_request_async (); auto connection_l (connection->shared_from_this ()); connection->node->background ([connection_l, message_a]() { connection_l->node->network.tcp_channels.process_message (message_a, connection_l->remote_endpoint, connection_l->remote_node_id, connection_l->socket, connection_l->type); }); } void bulk_pull (nano::bulk_pull const &) override { auto response (std::make_shared<nano::bulk_pull_server> (connection, std::unique_ptr<nano::bulk_pull> (static_cast<nano::bulk_pull *> (connection->requests.front ().release ())))); response->send_next (); } void bulk_pull_account (nano::bulk_pull_account const &) override { auto response (std::make_shared<nano::bulk_pull_account_server> (connection, std::unique_ptr<nano::bulk_pull_account> (static_cast<nano::bulk_pull_account *> (connection->requests.front ().release ())))); response->send_frontier (); } void bulk_push (nano::bulk_push const &) override { auto response (std::make_shared<nano::bulk_push_server> (connection)); response->receive (); } void frontier_req (nano::frontier_req const &) override { auto response (std::make_shared<nano::frontier_req_server> (connection, std::unique_ptr<nano::frontier_req> (static_cast<nano::frontier_req *> (connection->requests.front ().release ())))); response->send_next (); } void node_id_handshake (nano::node_id_handshake const & message_a) override { if (connection->node->config.logging.network_node_id_handshake_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Received node_id_handshake message from %1%") % connection->remote_endpoint)); } if (message_a.query) { boost::optional<std::pair<nano::account, nano::signature>> response (std::make_pair (connection->node->node_id.pub, nano::sign_message (connection->node->node_id.prv, connection->node->node_id.pub, *message_a.query))); assert (!nano::validate_message (response->first, *message_a.query, response->second)); auto cookie (connection->node->network.syn_cookies.assign (nano::transport::map_tcp_to_endpoint (connection->remote_endpoint))); nano::node_id_handshake response_message (cookie, response); auto bytes = response_message.to_bytes (); // clang-format off connection->socket->async_write (bytes, [ bytes, connection = connection ](boost::system::error_code const & ec, size_t size_a) { if (ec) { if (connection->node->config.logging.network_node_id_handshake_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Error sending node_id_handshake to %1%: %2%") % connection->remote_endpoint % ec.message ())); } // Stop invalid handshake connection->stop (); } else { connection->node->stats.inc (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::out); connection->finish_request (); } }); // clang-format on } else if (message_a.response) { auto node_id (message_a.response->first); connection->remote_node_id = node_id; if (!connection->node->network.syn_cookies.validate (nano::transport::map_tcp_to_endpoint (connection->remote_endpoint), node_id, message_a.response->second) && node_id != connection->node->node_id.pub) { connection->type = nano::bootstrap_server_type::realtime; ++connection->node->bootstrap.realtime_count; connection->finish_request_async (); } else { // Stop invalid handshake connection->stop (); } } else { connection->finish_request_async (); } auto node_id (connection->remote_node_id); nano::bootstrap_server_type type (connection->type); assert (node_id.is_zero () || type == nano::bootstrap_server_type::realtime); auto connection_l (connection->shared_from_this ()); connection->node->background ([connection_l, message_a, node_id, type]() { connection_l->node->network.tcp_channels.process_message (message_a, connection_l->remote_endpoint, node_id, connection_l->socket, type); }); } std::shared_ptr<nano::bootstrap_server> connection; }; } void nano::bootstrap_server::run_next () { assert (!requests.empty ()); request_response_visitor visitor (shared_from_this ()); requests.front ()->visit (visitor); } bool nano::bootstrap_server::is_bootstrap_connection () { if (type == nano::bootstrap_server_type::undefined && !node->flags.disable_bootstrap_listener && node->bootstrap.bootstrap_count < node->config.bootstrap_connections_max) { ++node->bootstrap.bootstrap_count; type = nano::bootstrap_server_type::bootstrap; } return type == nano::bootstrap_server_type::bootstrap; } /** * Handle a request for the pull of all blocks associated with an account * The account is supplied as the "start" member, and the final block to * send is the "end" member. The "start" member may also be a block * hash, in which case the that hash is used as the start of a chain * to send. To determine if "start" is interpretted as an account or * hash, the ledger is checked to see if the block specified exists, * if not then it is interpretted as an account. * * Additionally, if "start" is specified as a block hash the range * is inclusive of that block hash, that is the range will be: * [start, end); In the case that a block hash is not specified the * range will be exclusive of the frontier for that account with * a range of (frontier, end) */ void nano::bulk_pull_server::set_current_end () { include_start = false; assert (request != nullptr); auto transaction (connection->node->store.tx_begin_read ()); if (!connection->node->store.block_exists (transaction, request->end)) { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Bulk pull end block doesn't exist: %1%, sending everything") % request->end.to_string ())); } request->end.clear (); } if (connection->node->store.block_exists (transaction, request->start)) { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Bulk pull request for block hash: %1%") % request->start.to_string ())); } current = request->start; include_start = true; } else { nano::account_info info; auto no_address (connection->node->store.account_get (transaction, request->start, info)); if (no_address) { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Request for unknown account: %1%") % request->start.to_account ())); } current = request->end; } else { current = info.head; if (!request->end.is_zero ()) { auto account (connection->node->ledger.account (transaction, request->end)); if (account != request->start) { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Request for block that is not on account chain: %1% not on %2%") % request->end.to_string () % request->start.to_account ())); } current = request->end; } } } } sent_count = 0; if (request->is_count_present ()) { max_count = request->count; } else { max_count = 0; } } void nano::bulk_pull_server::send_next () { auto block (get_next ()); if (block != nullptr) { { send_buffer->clear (); nano::vectorstream stream (*send_buffer); nano::serialize_block (stream, *block); } auto this_l (shared_from_this ()); if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Sending block: %1%") % block->hash ().to_string ())); } connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->sent_action (ec, size_a); }); } else { send_finished (); } } std::shared_ptr<nano::block> nano::bulk_pull_server::get_next () { std::shared_ptr<nano::block> result; bool send_current = false, set_current_to_end = false; /* * Determine if we should reply with a block * * If our cursor is on the final block, we should signal that we * are done by returning a null result. * * Unless we are including the "start" member and this is the * start member, then include it anyway. */ if (current != request->end) { send_current = true; } else if (current == request->end && include_start == true) { send_current = true; /* * We also need to ensure that the next time * are invoked that we return a null result */ set_current_to_end = true; } /* * Account for how many blocks we have provided. If this * exceeds the requested maximum, return an empty object * to signal the end of results */ if (max_count != 0 && sent_count >= max_count) { send_current = false; } if (send_current) { auto transaction (connection->node->store.tx_begin_read ()); result = connection->node->store.block_get (transaction, current); if (result != nullptr && set_current_to_end == false) { auto previous (result->previous ()); if (!previous.is_zero ()) { current = previous; } else { current = request->end; } } else { current = request->end; } sent_count++; } /* * Once we have processed "get_next()" once our cursor is no longer on * the "start" member, so this flag is not relevant is always false. */ include_start = false; return result; } void nano::bulk_pull_server::sent_action (boost::system::error_code const & ec, size_t size_a) { if (!ec) { send_next (); } else { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Unable to bulk send block: %1%") % ec.message ())); } } } void nano::bulk_pull_server::send_finished () { send_buffer->clear (); send_buffer->push_back (static_cast<uint8_t> (nano::block_type::not_a_block)); auto this_l (shared_from_this ()); if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log ("Bulk sending finished"); } connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->no_block_sent (ec, size_a); }); } void nano::bulk_pull_server::no_block_sent (boost::system::error_code const & ec, size_t size_a) { if (!ec) { assert (size_a == 1); connection->finish_request (); } else { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log ("Unable to send not-a-block"); } } } nano::bulk_pull_server::bulk_pull_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::bulk_pull> request_a) : connection (connection_a), request (std::move (request_a)), send_buffer (std::make_shared<std::vector<uint8_t>> ()) { set_current_end (); } /** * Bulk pull blocks related to an account */ void nano::bulk_pull_account_server::set_params () { assert (request != nullptr); /* * Parse the flags */ invalid_request = false; pending_include_address = false; pending_address_only = false; if (request->flags == nano::bulk_pull_account_flags::pending_address_only) { pending_address_only = true; } else if (request->flags == nano::bulk_pull_account_flags::pending_hash_amount_and_address) { /** ** This is the same as "pending_hash_and_amount" but with the ** sending address appended, for UI purposes mainly. **/ pending_include_address = true; } else if (request->flags == nano::bulk_pull_account_flags::pending_hash_and_amount) { /** The defaults are set above **/ } else { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Invalid bulk_pull_account flags supplied %1%") % static_cast<uint8_t> (request->flags))); } invalid_request = true; return; } /* * Initialize the current item from the requested account */ current_key.account = request->account; current_key.hash = 0; } void nano::bulk_pull_account_server::send_frontier () { /* * This function is really the entry point into this class, * so handle the invalid_request case by terminating the * request without any response */ if (!invalid_request) { auto stream_transaction (connection->node->store.tx_begin_read ()); // Get account balance and frontier block hash auto account_frontier_hash (connection->node->ledger.latest (stream_transaction, request->account)); auto account_frontier_balance_int (connection->node->ledger.account_balance (stream_transaction, request->account)); nano::uint128_union account_frontier_balance (account_frontier_balance_int); // Write the frontier block hash and balance into a buffer send_buffer->clear (); { nano::vectorstream output_stream (*send_buffer); write (output_stream, account_frontier_hash.bytes); write (output_stream, account_frontier_balance.bytes); } // Send the buffer to the requestor auto this_l (shared_from_this ()); connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->sent_action (ec, size_a); }); } } void nano::bulk_pull_account_server::send_next_block () { /* * Get the next item from the queue, it is a tuple with the key (which * contains the account and hash) and data (which contains the amount) */ auto block_data (get_next ()); auto block_info_key (block_data.first.get ()); auto block_info (block_data.second.get ()); if (block_info_key != nullptr) { /* * If we have a new item, emit it to the socket */ send_buffer->clear (); if (pending_address_only) { nano::vectorstream output_stream (*send_buffer); if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Sending address: %1%") % block_info->source.to_string ())); } write (output_stream, block_info->source.bytes); } else { nano::vectorstream output_stream (*send_buffer); if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Sending block: %1%") % block_info_key->hash.to_string ())); } write (output_stream, block_info_key->hash.bytes); write (output_stream, block_info->amount.bytes); if (pending_include_address) { /** ** Write the source address as well, if requested **/ write (output_stream, block_info->source.bytes); } } auto this_l (shared_from_this ()); connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->sent_action (ec, size_a); }); } else { /* * Otherwise, finalize the connection */ if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Done sending blocks"))); } send_finished (); } } std::pair<std::unique_ptr<nano::pending_key>, std::unique_ptr<nano::pending_info>> nano::bulk_pull_account_server::get_next () { std::pair<std::unique_ptr<nano::pending_key>, std::unique_ptr<nano::pending_info>> result; while (true) { /* * For each iteration of this loop, establish and then * destroy a database transaction, to avoid locking the * database for a prolonged period. */ auto stream_transaction (connection->node->store.tx_begin_read ()); auto stream (connection->node->store.pending_begin (stream_transaction, current_key)); if (stream == nano::store_iterator<nano::pending_key, nano::pending_info> (nullptr)) { break; } nano::pending_key key (stream->first); nano::pending_info info (stream->second); /* * Get the key for the next value, to use in the next call or iteration */ current_key.account = key.account; current_key.hash = key.hash.number () + 1; /* * Finish up if the response is for a different account */ if (key.account != request->account) { break; } /* * Skip entries where the amount is less than the requested * minimum */ if (info.amount < request->minimum_amount) { continue; } /* * If the pending_address_only flag is set, de-duplicate the * responses. The responses are the address of the sender, * so they are are part of the pending table's information * and not key, so we have to de-duplicate them manually. */ if (pending_address_only) { if (!deduplication.insert (info.source).second) { /* * If the deduplication map gets too * large, clear it out. This may * result in some duplicates getting * sent to the client, but we do not * want to commit too much memory */ if (deduplication.size () > 4096) { deduplication.clear (); } continue; } } result.first = std::unique_ptr<nano::pending_key> (new nano::pending_key (key)); result.second = std::unique_ptr<nano::pending_info> (new nano::pending_info (info)); break; } return result; } void nano::bulk_pull_account_server::sent_action (boost::system::error_code const & ec, size_t size_a) { if (!ec) { send_next_block (); } else { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Unable to bulk send block: %1%") % ec.message ())); } } } void nano::bulk_pull_account_server::send_finished () { /* * The "bulk_pull_account" final sequence is a final block of all * zeros. If we are sending only account public keys (with the * "pending_address_only" flag) then it will be 256-bits of zeros, * otherwise it will be either 384-bits of zeros (if the * "pending_include_address" flag is not set) or 640-bits of zeros * (if that flag is set). */ send_buffer->clear (); { nano::vectorstream output_stream (*send_buffer); nano::uint256_union account_zero (0); nano::uint128_union balance_zero (0); write (output_stream, account_zero.bytes); if (!pending_address_only) { write (output_stream, balance_zero.bytes); if (pending_include_address) { write (output_stream, account_zero.bytes); } } } auto this_l (shared_from_this ()); if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log ("Bulk sending for an account finished"); } connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->complete (ec, size_a); }); } void nano::bulk_pull_account_server::complete (boost::system::error_code const & ec, size_t size_a) { if (!ec) { if (pending_address_only) { assert (size_a == 32); } else { if (pending_include_address) { assert (size_a == 80); } else { assert (size_a == 48); } } connection->finish_request (); } else { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log ("Unable to pending-as-zero"); } } } nano::bulk_pull_account_server::bulk_pull_account_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::bulk_pull_account> request_a) : connection (connection_a), request (std::move (request_a)), send_buffer (std::make_shared<std::vector<uint8_t>> ()), current_key (0, 0) { /* * Setup the streaming response for the first call to "send_frontier" and "send_next_block" */ set_params (); } nano::bulk_push_server::bulk_push_server (std::shared_ptr<nano::bootstrap_server> const & connection_a) : receive_buffer (std::make_shared<std::vector<uint8_t>> ()), connection (connection_a) { receive_buffer->resize (256); } void nano::bulk_push_server::receive () { if (connection->node->bootstrap_initiator.in_progress ()) { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log ("Aborting bulk_push because a bootstrap attempt is in progress"); } } else { auto this_l (shared_from_this ()); connection->socket->async_read (receive_buffer, 1, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { this_l->received_type (); } else { if (this_l->connection->node->config.logging.bulk_pull_logging ()) { this_l->connection->node->logger.try_log (boost::str (boost::format ("Error receiving block type: %1%") % ec.message ())); } } }); } } void nano::bulk_push_server::received_type () { auto this_l (shared_from_this ()); nano::block_type type (static_cast<nano::block_type> (receive_buffer->data ()[0])); switch (type) { case nano::block_type::send: { connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::send, nano::stat::dir::in); connection->socket->async_read (receive_buffer, nano::send_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::receive: { connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::receive, nano::stat::dir::in); connection->socket->async_read (receive_buffer, nano::receive_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::open: { connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::open, nano::stat::dir::in); connection->socket->async_read (receive_buffer, nano::open_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::change: { connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::change, nano::stat::dir::in); connection->socket->async_read (receive_buffer, nano::change_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::state: { connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::state_block, nano::stat::dir::in); connection->socket->async_read (receive_buffer, nano::state_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::not_a_block: { connection->finish_request (); break; } default: { if (connection->node->config.logging.network_packet_logging ()) { connection->node->logger.try_log ("Unknown type received as block type"); } break; } } } void nano::bulk_push_server::received_block (boost::system::error_code const & ec, size_t size_a, nano::block_type type_a) { if (!ec) { nano::bufferstream stream (receive_buffer->data (), size_a); auto block (nano::deserialize_block (stream, type_a)); if (block != nullptr && !nano::work_validate (*block)) { if (!connection->node->block_processor.full ()) { connection->node->process_active (std::move (block)); } receive (); } else { if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log ("Error deserializing block received from pull request"); } } } } nano::frontier_req_server::frontier_req_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::frontier_req> request_a) : connection (connection_a), current (request_a->start.number () - 1), frontier (0), request (std::move (request_a)), send_buffer (std::make_shared<std::vector<uint8_t>> ()), count (0) { next (); } void nano::frontier_req_server::send_next () { if (!current.is_zero () && count < request->count) { { send_buffer->clear (); nano::vectorstream stream (*send_buffer); write (stream, current.bytes); write (stream, frontier.bytes); } auto this_l (shared_from_this ()); if (connection->node->config.logging.bulk_pull_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Sending frontier for %1% %2%") % current.to_account () % frontier.to_string ())); } next (); connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->sent_action (ec, size_a); }); } else { send_finished (); } } void nano::frontier_req_server::send_finished () { { send_buffer->clear (); nano::vectorstream stream (*send_buffer); nano::uint256_union zero (0); write (stream, zero.bytes); write (stream, zero.bytes); } auto this_l (shared_from_this ()); if (connection->node->config.logging.network_logging ()) { connection->node->logger.try_log ("Frontier sending finished"); } connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->no_block_sent (ec, size_a); }); } void nano::frontier_req_server::no_block_sent (boost::system::error_code const & ec, size_t size_a) { if (!ec) { connection->finish_request (); } else { if (connection->node->config.logging.network_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Error sending frontier finish: %1%") % ec.message ())); } } } void nano::frontier_req_server::sent_action (boost::system::error_code const & ec, size_t size_a) { if (!ec) { count++; send_next (); } else { if (connection->node->config.logging.network_logging ()) { connection->node->logger.try_log (boost::str (boost::format ("Error sending frontier pair: %1%") % ec.message ())); } } } void nano::frontier_req_server::next () { // Filling accounts deque to prevent often read transactions if (accounts.empty ()) { auto now (nano::seconds_since_epoch ()); bool skip_old (request->age != std::numeric_limits<decltype (request->age)>::max ()); size_t max_size (128); auto transaction (connection->node->store.tx_begin_read ()); for (auto i (connection->node->store.latest_begin (transaction, current.number () + 1)), n (connection->node->store.latest_end ()); i != n && accounts.size () != max_size; ++i) { nano::account_info const & info (i->second); if (!skip_old || (now - info.modified) <= request->age) { nano::account const & account (i->first); accounts.emplace_back (account, info.head); } } /* If loop breaks before max_size, then latest_end () is reached Add empty record to finish frontier_req_server */ if (accounts.size () != max_size) { accounts.emplace_back (nano::account (0), nano::block_hash (0)); } } // Retrieving accounts from deque auto const & account_pair (accounts.front ()); current = account_pair.first; frontier = account_pair.second; accounts.pop_front (); } void nano::pulls_cache::add (nano::pull_info const & pull_a) { if (pull_a.processed > 500) { std::lock_guard<std::mutex> guard (pulls_cache_mutex); // Clean old pull if (cache.size () > cache_size_max) { cache.erase (cache.begin ()); } assert (cache.size () <= cache_size_max); nano::uint512_union head_512 (pull_a.account, pull_a.head_original); auto existing (cache.get<account_head_tag> ().find (head_512)); if (existing == cache.get<account_head_tag> ().end ()) { // Insert new pull auto inserted (cache.insert (nano::cached_pulls{ std::chrono::steady_clock::now (), head_512, pull_a.head })); assert (inserted.second); } else { // Update existing pull cache.get<account_head_tag> ().modify (existing, [pull_a](nano::cached_pulls & cache_a) { cache_a.time = std::chrono::steady_clock::now (); cache_a.new_head = pull_a.head; }); } } } void nano::pulls_cache::update_pull (nano::pull_info & pull_a) { std::lock_guard<std::mutex> guard (pulls_cache_mutex); nano::uint512_union head_512 (pull_a.account, pull_a.head_original); auto existing (cache.get<account_head_tag> ().find (head_512)); if (existing != cache.get<account_head_tag> ().end ()) { pull_a.head = existing->new_head; } } void nano::pulls_cache::remove (nano::pull_info const & pull_a) { std::lock_guard<std::mutex> guard (pulls_cache_mutex); nano::uint512_union head_512 (pull_a.account, pull_a.head_original); cache.get<account_head_tag> ().erase (head_512); } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (pulls_cache & pulls_cache, const std::string & name) { size_t cache_count = 0; { std::lock_guard<std::mutex> guard (pulls_cache.pulls_cache_mutex); cache_count = pulls_cache.cache.size (); } auto sizeof_element = sizeof (decltype (pulls_cache.cache)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "pulls_cache", cache_count, sizeof_element })); return composite; } }
1
15,726
This existed already, but seeing as you are changing the variable to have an explicit type it could be made a const reference to remove a copy.
nanocurrency-nano-node
cpp
@@ -0,0 +1,11 @@ +module Faker + class FamousLastWords < Base + flexible :famous_last_words + + class << self + def name + fetch('famous_last_words.phrase') + end + end + end +end
1
1
7,986
I don't think this code is being tested; this method can be removed without the test failing
faker-ruby-faker
rb
@@ -298,6 +298,11 @@ public class FrameFragment extends BaseEditFragment { // Asynchronous loading of thumbnails private class asyncThumbs extends AsyncTask<Void, Void, Void> { + @Override + protected void onPreExecute() { + arrayList=new ArrayList<>(); + } + @Override protected Void doInBackground(Void... params) { arrayList = new ArrayList<>();
1
package org.fossasia.phimpme.editor.fragment; import android.content.res.AssetManager; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.Canvas; import android.os.AsyncTask; import android.os.Bundle; import android.support.annotation.Nullable; import android.support.v7.widget.LinearLayoutManager; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageButton; import android.widget.ImageView; import android.widget.Toast; import org.fossasia.phimpme.MyApplication; import org.fossasia.phimpme.R; import org.fossasia.phimpme.editor.EditImageActivity; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; /** * Created by navdeep on 27/2/18. */ public class FrameFragment extends BaseEditFragment { private static final String TAG = "FrameFragment"; private static Bitmap original; private android.support.v7.widget.RecyclerView frameRecycler; private ArrayList<Bitmap> arrayList = null; private Bitmap lastBitmap; private int lastFrame = 99; private View frameView; private ImageButton imgBtnDone, imgBtnCancel; public static FrameFragment newInstance(Bitmap bmp) { Bundle args = new Bundle(); FrameFragment fragment = new FrameFragment(); fragment.setArguments(args); original = bmp.copy(Bitmap.Config.ARGB_8888, true); return fragment; } @Nullable @Override public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) { frameView = inflater.inflate(R.layout.fragment_editor_frames, null); return frameView; } @Override public void onActivityCreated(Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); frameRecycler = (android.support.v7.widget.RecyclerView) frameView.findViewById(R.id.frameRecyler); imgBtnDone = (ImageButton) frameView.findViewById(R.id.done); imgBtnCancel = (ImageButton) frameView.findViewById(R.id.cancel); imgBtnCancel.setImageResource(R.drawable.ic_close_black_24dp); imgBtnDone.setImageResource(R.drawable.ic_done_black_24dp); onShow(); setUpLayoutManager(); recyclerView rv = new recyclerView(); frameRecycler.setAdapter(rv); imgBtnCancel.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { activity.mainImage.setImageBitmap(original); setVisibilty(false); } }); imgBtnDone.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (!lastBitmap.sameAs(original)) { activity.changeMainBitmap(lastBitmap); backToMain(); } } }); } @Override public void onDestroy() { super.onDestroy(); MyApplication.getRefWatcher(getContext()).watch(this); lastBitmap = null; System.gc(); } @Override public void onShow() { asyncThumbs asyncThumbs = new asyncThumbs(); asyncThumbs.execute(); } //Helper methods //set linearLayoutManager private void setUpLayoutManager() { LinearLayoutManager linearLayoutManager; linearLayoutManager = new LinearLayoutManager(getContext(), LinearLayoutManager.HORIZONTAL, false); frameRecycler.setLayoutManager(linearLayoutManager); } // public void backToMain() { setVisibilty(false); activity.changeMode(EditImageActivity.MODE_MAIN); } //start asyncFrame.execute() private void loadFrame(int pos) { new asyncFrame().execute(pos); } private void setVisibilty(Boolean visibility) { if (visibility) { imgBtnCancel.setVisibility(View.VISIBLE); imgBtnDone.setVisibility(View.VISIBLE); } else { imgBtnCancel.setVisibility(View.GONE); imgBtnDone.setVisibility(View.GONE); } } private boolean checkVisibility() { return imgBtnCancel.getVisibility() == View.VISIBLE ? true : false; } private class recyclerView extends android.support.v7.widget.RecyclerView.Adapter<recyclerView.viewHolder> { @Override public viewHolder onCreateViewHolder(ViewGroup parent, int viewType) { View view = View.inflate(getContext(), R.layout.frames, null); return new viewHolder(view); } @Override public void onBindViewHolder(final viewHolder holder, final int position) { holder.imageView.setImageBitmap(arrayList.get(position)); holder.imageView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (!checkVisibility()) { setVisibilty(true); } if (lastFrame != position) { loadFrame(position); } } }); } @Override public int getItemCount() { return arrayList.size(); } public class viewHolder extends android.support.v7.widget.RecyclerView.ViewHolder { private ImageView imageView; public viewHolder(View itemView) { super(itemView); imageView = (ImageView) itemView.findViewById(R.id.frames); } } } private class asyncFrame extends AsyncTask<Integer, Integer, Bitmap> { @Override protected void onPreExecute() { super.onPreExecute(); activity.showProgressBar(); } @Override protected Bitmap doInBackground(Integer... params) { return drawFrame(params[0]); } @Override protected void onPostExecute(Bitmap bitmap) { super.onPostExecute(bitmap); activity.mainImage.setImageBitmap(bitmap); lastBitmap = bitmap; activity.hideProgressBar(); } /** * @param pos selected name of frame from assets */ private Bitmap drawFrame(int pos) { InputStream is; try { if (original != null && pos < 11) { is = getResources().getAssets().open("frames" + File.separator + pos + ".png"); Offset of; of = offset(pos); int width = of.getWidth(); int height = of.getHeight(); Bitmap main = original; Bitmap temp = main.copy(Bitmap.Config.ARGB_8888, true); Bitmap frame = BitmapFactory.decodeStream(is).copy(Bitmap.Config.ARGB_8888, true); is.close(); Bitmap draw = Bitmap.createScaledBitmap(frame, (2 * (width)) + temp.getWidth(), (2 * (height)) + temp.getHeight(), false); of = null; of = offset(draw); int widthForTemp = of.getWidth(); int heightForTemp = of.getHeight(); //calculate offset after scaling Bitmap latestBmp = Bitmap.createBitmap(2 * (widthForTemp) + temp.getWidth(), 2 * (heightForTemp) + temp.getHeight(), Bitmap.Config.ARGB_8888); Bitmap frameNew = Bitmap.createScaledBitmap(frame, (2 * (widthForTemp)) + temp.getWidth(), (2 * (heightForTemp)) + temp.getHeight(), false); frame.recycle(); Canvas can = new Canvas(latestBmp); can.drawBitmap(temp, widthForTemp, heightForTemp, null); can.drawBitmap(frameNew, 0, 0, null); frame.recycle(); temp.recycle(); frameNew.recycle(); lastFrame = pos; return latestBmp; } else { Bitmap temp = original.copy(Bitmap.Config.ARGB_8888, true); Canvas can = new Canvas(temp); is = getResources().getAssets().open("frames" + File.separator + pos + ".png"); Bitmap frame = BitmapFactory.decodeStream(is); is.close(); Bitmap frameNew = Bitmap.createScaledBitmap(frame, temp.getWidth(), temp.getHeight(), false); can.drawBitmap(frameNew, 0, 0, null); frameNew.recycle(); frame.recycle(); lastFrame = pos; return temp; } } catch (IOException e) { e.printStackTrace(); Toast.makeText(getContext(), e.getMessage(), Toast.LENGTH_SHORT).show(); return null; } } //get offset object private Offset offset(int pos) { int point_x = 0; int point_y = 0; int width_off = 0; int height_off = 0; Bitmap temp = null; try { temp = BitmapFactory.decodeStream(getResources().getAssets().open("frames" + File.separator + pos + ".png")); } catch (IOException e) { e.printStackTrace(); } while (temp.getPixel(point_x, temp.getHeight() / 2) != 0) { width_off++; point_x++; } while (temp.getPixel(temp.getWidth() / 2, point_y) != 0) { height_off++; point_y++; } return new Offset(width_off + 2, height_off + 2); } private Offset offset(Bitmap bitmap) { int point_x = 0; int point_y = 0; int width_off = 0; int height_off = 0; Bitmap temp; if (bitmap.isMutable()) { temp = bitmap; } else { temp = bitmap.copy(Bitmap.Config.ARGB_8888, true); } while (temp.getPixel(point_x, temp.getHeight() / 2) != 0) { width_off++; point_x++; } while (temp.getPixel(temp.getWidth() / 2, point_y) != 0) { height_off++; point_y++; } return new Offset(width_off + 2, height_off + 2); } //Offset class determines the offset of selected frame private class Offset { private int width, height; private Offset(int width, int height) { this.width = width; this.height = height; } public int getWidth() { return width; } public void setWidth(int width) { this.width = width; } public int getHeight() { return height; } public void setHeight(int height) { this.height = height; } } } // Asynchronous loading of thumbnails private class asyncThumbs extends AsyncTask<Void, Void, Void> { @Override protected Void doInBackground(Void... params) { arrayList = new ArrayList<>(); InputStream is = null; Bitmap tempBitmap; String frameFolder = "frames"; AssetManager assetmanager = getResources().getAssets(); try { String str[] = assetmanager.list("frames"); for (int file = 0; file < str.length; file++) { //sort according to name is = assetmanager.open(frameFolder + File.separator + file + ".png"); tempBitmap = Bitmap.createScaledBitmap(BitmapFactory.decodeStream(is), 140, 160, false); tempBitmap.compress(Bitmap.CompressFormat.JPEG, 100, new ByteArrayOutputStream()); arrayList.add(tempBitmap); } is.close(); } catch (IOException IOE) { Log.i(TAG, "getAssets: " + IOE.getMessage()); } return null; } @Override protected void onPostExecute(Void aVoid) { super.onPostExecute(aVoid); if (arrayList == null) return; frameRecycler.getAdapter().notifyDataSetChanged(); } } }
1
12,554
@codepoet2017390 You have already initialized this ArrayList in the onPreExecute method then why do it here too?
fossasia-phimpme-android
java
@@ -0,0 +1,13 @@ +describe Gsa18f::ProcurementPolicy do + subject { described_class } + + permissions :can_create? do + with_feature 'RESTRICT_ACCESS' do + it "doesn't allow someone with a non-GSA email to create" do + user = User.new(email_address: '[email protected]') + procurement = Gsa18f::Procurement.new + expect(subject).not_to permit(user, procurement) + end + end + end +end
1
1
13,340
This covers the `RESTRICT_ACCESS=true` x non-gsa pair, but we probably want to test the three other permutations: (`RA=true`, gsa), (`RA=false`, non-gsa), (`RA=false`, gsa)
18F-C2
rb
@@ -18,13 +18,13 @@ define(['dom', 'scroller', 'browser', 'layoutManager', 'focusManager', 'register function onFocus(e) { - if (layoutManager.tv) { + /*if (layoutManager.tv) { if (this.focusTimeout) { clearTimeout(this.focusTimeout); } this.focusTimeout = setTimeout(getFocusCallback(this, e), 700); - } + }*/ } function getTabPanel(tabs, index) {
1
define(['dom', 'scroller', 'browser', 'layoutManager', 'focusManager', 'registerElement', 'css!./emby-tabs', 'scrollStyles'], function (dom, scroller, browser, layoutManager, focusManager) { 'use strict'; var EmbyTabs = Object.create(HTMLDivElement.prototype); var buttonClass = 'emby-tab-button'; var activeButtonClass = buttonClass + '-active'; function setActiveTabButton(tabs, newButton, oldButton, animate) { newButton.classList.add(activeButtonClass); } function getFocusCallback(tabs, e) { return function () { onClick.call(tabs, e); }; } function onFocus(e) { if (layoutManager.tv) { if (this.focusTimeout) { clearTimeout(this.focusTimeout); } this.focusTimeout = setTimeout(getFocusCallback(this, e), 700); } } function getTabPanel(tabs, index) { return null; } function removeActivePanelClass(tabs, index) { var tabPanel = getTabPanel(tabs, index); if (tabPanel) { tabPanel.classList.remove('is-active'); } } function addActivePanelClass(tabs, index) { var tabPanel = getTabPanel(tabs, index); if (tabPanel) { tabPanel.classList.add('is-active'); } } function fadeInRight(elem) { var pct = browser.mobile ? '4%' : '0.5%'; var keyframes = [ { opacity: '0', transform: 'translate3d(' + pct + ', 0, 0)', offset: 0 }, { opacity: '1', transform: 'none', offset: 1 }]; elem.animate(keyframes, { duration: 160, iterations: 1, easing: 'ease-out' }); } function triggerBeforeTabChange(tabs, index, previousIndex) { tabs.dispatchEvent(new CustomEvent("beforetabchange", { detail: { selectedTabIndex: index, previousIndex: previousIndex } })); if (previousIndex != null && previousIndex !== index) { removeActivePanelClass(tabs, previousIndex); } var newPanel = getTabPanel(tabs, index); if (newPanel) { // animate new panel ? if (newPanel.animate) { fadeInRight(newPanel); } newPanel.classList.add('is-active'); } } function onClick(e) { if (this.focusTimeout) { clearTimeout(this.focusTimeout); } var tabs = this; var current = tabs.querySelector('.' + activeButtonClass); var tabButton = dom.parentWithClass(e.target, buttonClass); if (tabButton && tabButton !== current) { if (current) { current.classList.remove(activeButtonClass); } var previousIndex = current ? parseInt(current.getAttribute('data-index')) : null; setActiveTabButton(tabs, tabButton, current, true); var index = parseInt(tabButton.getAttribute('data-index')); triggerBeforeTabChange(tabs, index, previousIndex); // If toCenter is called syncronously within the click event, it sometimes ends up canceling it setTimeout(function () { tabs.selectedTabIndex = index; tabs.dispatchEvent(new CustomEvent("tabchange", { detail: { selectedTabIndex: index, previousIndex: previousIndex } })); }, 120); if (tabs.scroller) { tabs.scroller.toCenter(tabButton, false); } } } function initScroller(tabs) { if (tabs.scroller) { return; } var contentScrollSlider = tabs.querySelector('.emby-tabs-slider'); if (contentScrollSlider) { tabs.scroller = new scroller(tabs, { horizontal: 1, itemNav: 0, mouseDragging: 1, touchDragging: 1, slidee: contentScrollSlider, smart: true, releaseSwing: true, scrollBy: 200, speed: 120, elasticBounds: 1, dragHandle: 1, dynamicHandle: 1, clickBar: 1, hiddenScroll: true, // In safari the transform is causing the headers to occasionally disappear or flicker requireAnimation: !browser.safari, allowNativeSmoothScroll: true }); tabs.scroller.init(); } else { tabs.classList.add('scrollX'); tabs.classList.add('hiddenScrollX'); tabs.classList.add('smoothScrollX'); } } EmbyTabs.createdCallback = function () { if (this.classList.contains('emby-tabs')) { return; } this.classList.add('emby-tabs'); this.classList.add('focusable'); dom.addEventListener(this, 'click', onClick, { passive: true }); dom.addEventListener(this, 'focus', onFocus, { passive: true, capture: true }); }; EmbyTabs.focus = function () { var selected = this.querySelector('.' + activeButtonClass); if (selected) { focusManager.focus(selected); } else { focusManager.autoFocus(this); } }; EmbyTabs.refresh = function () { if (this.scroller) { this.scroller.reload(); } }; EmbyTabs.attachedCallback = function () { initScroller(this); var current = this.querySelector('.' + activeButtonClass); var currentIndex = current ? parseInt(current.getAttribute('data-index')) : parseInt(this.getAttribute('data-index') || '0'); if (currentIndex !== -1) { this.selectedTabIndex = currentIndex; var tabButtons = this.querySelectorAll('.' + buttonClass); var newTabButton = tabButtons[currentIndex]; if (newTabButton) { setActiveTabButton(this, newTabButton, current, false); } } if (!this.readyFired) { this.readyFired = true; this.dispatchEvent(new CustomEvent("ready", {})); } }; EmbyTabs.detachedCallback = function () { if (this.scroller) { this.scroller.destroy(); this.scroller = null; } dom.removeEventListener(this, 'click', onClick, { passive: true }); dom.removeEventListener(this, 'focus', onFocus, { passive: true, capture: true }); }; function getSelectedTabButton(elem) { return elem.querySelector('.' + activeButtonClass); } EmbyTabs.selectedIndex = function (selected, triggerEvent) { var tabs = this; if (selected == null) { return tabs.selectedTabIndex || 0; } var current = tabs.selectedIndex(); tabs.selectedTabIndex = selected; var tabButtons = tabs.querySelectorAll('.' + buttonClass); if (current === selected || triggerEvent === false) { triggerBeforeTabChange(tabs, selected, current); tabs.dispatchEvent(new CustomEvent("tabchange", { detail: { selectedTabIndex: selected } })); var currentTabButton = tabButtons[current]; setActiveTabButton(tabs, tabButtons[selected], currentTabButton, false); if (current !== selected && currentTabButton) { currentTabButton.classList.remove(activeButtonClass); } } else { onClick.call(tabs, { target: tabButtons[selected] }); //tabButtons[selected].click(); } }; function getSibling(elem, method) { var sibling = elem[method]; while (sibling) { if (sibling.classList.contains(buttonClass)) { if (!sibling.classList.contains('hide')) { return sibling; } } sibling = sibling[method]; } return null; } EmbyTabs.selectNext = function () { var current = getSelectedTabButton(this); var sibling = getSibling(current, 'nextSibling'); if (sibling) { onClick.call(this, { target: sibling }); } }; EmbyTabs.selectPrevious = function () { var current = getSelectedTabButton(this); var sibling = getSibling(current, 'previousSibling'); if (sibling) { onClick.call(this, { target: sibling }); } }; EmbyTabs.triggerBeforeTabChange = function (selected) { var tabs = this; triggerBeforeTabChange(tabs, tabs.selectedIndex()); }; EmbyTabs.triggerTabChange = function (selected) { var tabs = this; tabs.dispatchEvent(new CustomEvent("tabchange", { detail: { selectedTabIndex: tabs.selectedIndex() } })); }; EmbyTabs.setTabEnabled = function (index, enabled) { var tabs = this; var btn = this.querySelector('.emby-tab-button[data-index="' + index + '"]'); if (enabled) { btn.classList.remove('hide'); } else { btn.classList.remove('add'); } }; document.registerElement('emby-tabs', { prototype: EmbyTabs, extends: 'div' }); });
1
11,979
Please don't do this - if you want to disable code permanently delete the code, if you need to disable it to test don't commit. Such sections are very poorly highlighted during review and can cause unspotted issues slipping through the process...
jellyfin-jellyfin-web
js
@@ -23,3 +23,10 @@ import ( type ConnTrackDumper interface { DumpFlows(zoneFilter uint16) ([]*flowexporter.Connection, error) } + +// ConnTrackInterfacer is an interface created to consume the required dump functions from either the third party +// conntrack library or internal packages depending on OVS datapath type or OS. +type ConnTrackInterfacer interface { + GetConnTrack(config interface{}) error // suggest a different name for config if it is not appropriate + DumpFilter(filter interface{}) ([]*flowexporter.Connection, error) +}
1
// Copyright 2020 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package connections import ( "github.com/vmware-tanzu/antrea/pkg/agent/flowexporter" ) // ConnTrackDumper is an interface that is used to dump connections from // conntrack module. type ConnTrackDumper interface { DumpFlows(zoneFilter uint16) ([]*flowexporter.Connection, error) }
1
19,291
I'm starting too feel we have too much abstraction and too many interfaces here. But this method is definitely the most confusing IMO. For one, it's called "Get*" but it does not return anything... I don't have a good understanding of what's going on anymore, so it's hard for me to suggest an alternative, but it's pretty clear to me that this interface needs to be rethought. An interface where all the parameters are themselves arbitrary objects (`interface{}`), whose actual type depends on the specific interface implementation, is not a helpful abstraction
antrea-io-antrea
go
@@ -25,6 +25,7 @@ import com.google.common.collect.HashMultimap; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Multimap; +import org.openqa.selenium.devtools.target.model.SessionId; import org.openqa.selenium.json.Json; import org.openqa.selenium.json.JsonInput; import org.openqa.selenium.remote.http.HttpClient;
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.devtools; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.openqa.selenium.json.Json.MAP_TYPE; import static org.openqa.selenium.remote.http.HttpMethod.GET; import com.google.common.collect.HashMultimap; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Multimap; import org.openqa.selenium.json.Json; import org.openqa.selenium.json.JsonInput; import org.openqa.selenium.remote.http.HttpClient; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.WebSocket; import java.io.Closeable; import java.io.StringReader; import java.time.Duration; import java.util.LinkedHashMap; import java.util.Map; import java.util.Objects; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; public class Connection implements Closeable { private static final Json JSON = new Json(); private static final AtomicLong NEXT_ID = new AtomicLong(1L); private final WebSocket socket; private final Map<Long, Consumer<JsonInput>> methodCallbacks = new LinkedHashMap<>(); private final Multimap<Event<?>, Consumer<?>> eventCallbacks = HashMultimap.create(); public Connection(HttpClient client, String url) { Objects.requireNonNull(client, "HTTP client must be set."); Objects.requireNonNull(url, "URL to connect to must be set."); socket = client.openSocket(new HttpRequest(GET, url), new Listener()); } public <X> CompletableFuture<X> send(Target.SessionId sessionId, Command<X> command) { long id = NEXT_ID.getAndIncrement(); CompletableFuture<X> result = new CompletableFuture<>(); methodCallbacks.put(id, input -> { X value = command.getMapper().apply(input); result.complete(value); }); ImmutableMap.Builder<String, Object> serialized = ImmutableMap.builder(); serialized.put("id", id); serialized.put("method", command.getMethod()); serialized.put("params", command.getParams()); if (sessionId != null) { serialized.put("sessionId", sessionId); } socket.sendText(JSON.toJson(serialized.build())); return result; } public <X> X sendAndWait(Target.SessionId sessionId, Command<X> command, Duration timeout) { try { return send(sessionId, command).get(timeout.toMillis(), MILLISECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IllegalStateException("Thread has been interrupted", e); } catch (ExecutionException e) { Throwable cause = e; if (e.getCause() != null) { cause = e.getCause(); } throw new DevToolsException(cause); } catch (TimeoutException e) { throw new org.openqa.selenium.TimeoutException(e); } } public <X> void addListener(Event<X> event, Consumer<X> handler) { Objects.requireNonNull(event); Objects.requireNonNull(handler); eventCallbacks.put(event, handler); } @Override public void close() { socket.close(); } private class Listener extends WebSocket.Listener { @Override public void onText(CharSequence data) { // It's kind of gross to decode the data twice, but this lets us get started on something // that feels nice to users. // TODO: decode once, and once only String asString = String.valueOf(data); Map<String, Object> raw = JSON.toType(asString, MAP_TYPE); if (raw.get("id") instanceof Number && raw.get("result") != null) { Consumer<JsonInput> consumer = methodCallbacks.remove(((Number) raw.get("id")).longValue()); if (consumer == null) { return; } try (StringReader reader = new StringReader(asString); JsonInput input = JSON.newInput(reader)) { input.beginObject(); while (input.hasNext()) { switch (input.nextName()) { case "result": consumer.accept(input); break; default: input.skipValue(); } } input.endObject(); } } else if (raw.get("method") instanceof String && raw.get("params") instanceof Map) { System.out.println("Seen: " + raw); // TODO: Also only decode once. eventCallbacks.keySet().stream() .filter(event -> raw.get("method").equals(event.getMethod())) .forEach(event -> { // TODO: This is grossly inefficient. I apologise, and we should fix this. try (StringReader reader = new StringReader(asString); JsonInput input = JSON.newInput(reader)) { Object value = null; input.beginObject(); while (input.hasNext()) { switch (input.nextName()) { case "params": value = event.getMapper().apply(input); break; default: input.skipValue(); break; } } input.endObject(); if (value == null) { // Do nothing. return; } final Object finalValue = value; for (Consumer<?> action : eventCallbacks.get(event)) { @SuppressWarnings("unchecked") Consumer<Object> obj = (Consumer<Object>) action; obj.accept(finalValue); } } }); } else { System.out.println("Unhandled type: " + data); } } } }
1
16,697
It fills me with endless sadness that we can't use Selenium's own `SessionId` here.
SeleniumHQ-selenium
rb
@@ -2269,7 +2269,12 @@ public class ZkStateReader implements SolrCloseable { log.debug("Checking ZK for most up to date Aliases {}", ALIASES); // Call sync() first to ensure the subsequent read (getData) is up to date. zkClient.getSolrZooKeeper().sync(ALIASES, null, null); - Stat stat = new Stat(); + Stat stat = zkClient.exists(ALIASES, null, true); + if (stat.getVersion() <= aliases.getZNodeVersion()) { + //we already have the latest version. + return false; + } + stat = new Stat(); final byte[] data = zkClient.getData(ALIASES, null, stat, true); return setIfNewer(Aliases.fromJSON(data, stat.getVersion())); }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.common.cloud; import java.lang.invoke.MethodHandles; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; import java.util.function.UnaryOperator; import java.util.stream.Collectors; import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig; import org.apache.solr.common.AlreadyClosedException; import org.apache.solr.common.Callable; import org.apache.solr.common.SolrCloseable; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.params.AutoScalingParams; import org.apache.solr.common.params.CollectionAdminParams; import org.apache.solr.common.params.CoreAdminParams; import org.apache.solr.common.util.ExecutorUtil; import org.apache.solr.common.util.ObjectReleaseTracker; import org.apache.solr.common.util.Pair; import org.apache.solr.common.util.SolrNamedThreadFactory; import org.apache.solr.common.util.Utils; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NoNodeException; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher; import org.apache.zookeeper.Watcher.Event.EventType; import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static java.util.Arrays.asList; import static java.util.Collections.EMPTY_MAP; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.emptySortedSet; import static java.util.Collections.unmodifiableSet; import static org.apache.solr.common.cloud.UrlScheme.HTTP; import static org.apache.solr.common.util.Utils.fromJSON; public class ZkStateReader implements SolrCloseable { public static final int STATE_UPDATE_DELAY = Integer.getInteger("solr.OverseerStateUpdateDelay", 2000); // delay between cloud state updates private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); public static final String BASE_URL_PROP = "base_url"; public static final String NODE_NAME_PROP = "node_name"; public static final String CORE_NODE_NAME_PROP = "core_node_name"; public static final String ROLES_PROP = "roles"; public static final String STATE_PROP = "state"; // if this flag equals to false and the replica does not exist in cluster state, set state op become no op (default is true) public static final String FORCE_SET_STATE_PROP = "force_set_state"; /** * SolrCore name. */ public static final String CORE_NAME_PROP = "core"; public static final String COLLECTION_PROP = "collection"; public static final String ELECTION_NODE_PROP = "election_node"; public static final String SHARD_ID_PROP = "shard"; public static final String REPLICA_PROP = "replica"; public static final String SHARD_RANGE_PROP = "shard_range"; public static final String SHARD_STATE_PROP = "shard_state"; public static final String SHARD_PARENT_PROP = "shard_parent"; public static final String NUM_SHARDS_PROP = "numShards"; public static final String LEADER_PROP = "leader"; public static final String SHARED_STORAGE_PROP = "shared_storage"; public static final String PROPERTY_PROP = "property"; public static final String PROPERTY_PROP_PREFIX = "property."; public static final String PROPERTY_VALUE_PROP = "property.value"; public static final String MAX_AT_ONCE_PROP = "maxAtOnce"; public static final String MAX_WAIT_SECONDS_PROP = "maxWaitSeconds"; public static final String STATE_TIMESTAMP_PROP = "stateTimestamp"; public static final String COLLECTIONS_ZKNODE = "/collections"; public static final String LIVE_NODES_ZKNODE = "/live_nodes"; public static final String ALIASES = "/aliases.json"; public static final String CLUSTER_STATE = "/clusterstate.json"; public static final String CLUSTER_PROPS = "/clusterprops.json"; public static final String COLLECTION_PROPS_ZKNODE = "collectionprops.json"; public static final String REJOIN_AT_HEAD_PROP = "rejoinAtHead"; public static final String SOLR_SECURITY_CONF_PATH = "/security.json"; public static final String SOLR_AUTOSCALING_CONF_PATH = "/autoscaling.json"; public static final String SOLR_AUTOSCALING_EVENTS_PATH = "/autoscaling/events"; public static final String SOLR_AUTOSCALING_TRIGGER_STATE_PATH = "/autoscaling/triggerState"; public static final String SOLR_AUTOSCALING_NODE_ADDED_PATH = "/autoscaling/nodeAdded"; public static final String SOLR_AUTOSCALING_NODE_LOST_PATH = "/autoscaling/nodeLost"; public static final String SOLR_PKGS_PATH = "/packages.json"; public static final String DEFAULT_SHARD_PREFERENCES = "defaultShardPreferences"; public static final String REPLICATION_FACTOR = "replicationFactor"; public static final String MAX_SHARDS_PER_NODE = "maxShardsPerNode"; public static final String AUTO_ADD_REPLICAS = "autoAddReplicas"; public static final String MAX_CORES_PER_NODE = "maxCoresPerNode"; public static final String PULL_REPLICAS = "pullReplicas"; public static final String NRT_REPLICAS = "nrtReplicas"; public static final String TLOG_REPLICAS = "tlogReplicas"; public static final String READ_ONLY = "readOnly"; public static final String ROLES = "/roles.json"; public static final String CONFIGS_ZKNODE = "/configs"; public final static String CONFIGNAME_PROP = "configName"; public static final String LEGACY_CLOUD = "legacyCloud"; public static final String SAMPLE_PERCENTAGE = "samplePercentage"; /** * @deprecated use {@link org.apache.solr.common.params.CollectionAdminParams#DEFAULTS} instead. */ @Deprecated public static final String COLLECTION_DEF = "collectionDefaults"; public static final String URL_SCHEME = "urlScheme"; private static final String SOLR_ENVIRONMENT = "environment"; public static final String REPLICA_TYPE = "type"; /** * A view of the current state of all collections; combines all the different state sources into a single view. */ protected volatile ClusterState clusterState; private static final int GET_LEADER_RETRY_INTERVAL_MS = 50; private static final int GET_LEADER_RETRY_DEFAULT_TIMEOUT = Integer.parseInt(System.getProperty("zkReaderGetLeaderRetryTimeoutMs", "4000")); ; public static final String LEADER_ELECT_ZKNODE = "leader_elect"; public static final String SHARD_LEADERS_ZKNODE = "leaders"; public static final String ELECTION_NODE = "election"; /** * Collections tracked in the legacy (shared) state format, reflects the contents of clusterstate.json. */ private Map<String, ClusterState.CollectionRef> legacyCollectionStates = emptyMap(); /** * Last seen ZK version of clusterstate.json. */ private int legacyClusterStateVersion = 0; /** * Collections with format2 state.json, "interesting" and actively watched. */ private final ConcurrentHashMap<String, DocCollection> watchedCollectionStates = new ConcurrentHashMap<>(); /** * Collections with format2 state.json, not "interesting" and not actively watched. */ private final ConcurrentHashMap<String, LazyCollectionRef> lazyCollectionStates = new ConcurrentHashMap<>(); /** * Collection properties being actively watched */ private final ConcurrentHashMap<String, VersionedCollectionProps> watchedCollectionProps = new ConcurrentHashMap<>(); /** * Collection properties being actively watched */ private final ConcurrentHashMap<String, PropsWatcher> collectionPropsWatchers = new ConcurrentHashMap<>(); private volatile SortedSet<String> liveNodes = emptySortedSet(); private volatile Map<String, Object> clusterProperties = Collections.emptyMap(); private final ZkConfigManager configManager; private ConfigData securityData; private final Runnable securityNodeListener; private ConcurrentHashMap<String, CollectionWatch<DocCollectionWatcher>> collectionWatches = new ConcurrentHashMap<>(); // named this observers so there's less confusion between CollectionPropsWatcher map and the PropsWatcher map. private ConcurrentHashMap<String, CollectionWatch<CollectionPropsWatcher>> collectionPropsObservers = new ConcurrentHashMap<>(); private Set<CloudCollectionsListener> cloudCollectionsListeners = ConcurrentHashMap.newKeySet(); private final ExecutorService notifications = ExecutorUtil.newMDCAwareCachedThreadPool("watches"); private Set<LiveNodesListener> liveNodesListeners = ConcurrentHashMap.newKeySet(); private Set<ClusterPropertiesListener> clusterPropertiesListeners = ConcurrentHashMap.newKeySet(); /** * Used to submit notifications to Collection Properties watchers in order **/ private final ExecutorService collectionPropsNotifications = ExecutorUtil.newMDCAwareSingleThreadExecutor(new SolrNamedThreadFactory("collectionPropsNotifications")); private static final long LAZY_CACHE_TIME = TimeUnit.NANOSECONDS.convert(STATE_UPDATE_DELAY, TimeUnit.MILLISECONDS); private Future<?> collectionPropsCacheCleaner; // only kept to identify if the cleaner has already been started. /** * Get current {@link AutoScalingConfig}. * * @return current configuration from <code>autoscaling.json</code>. NOTE: * this data is retrieved from ZK on each call. */ public AutoScalingConfig getAutoScalingConfig() throws KeeperException, InterruptedException { return getAutoScalingConfig(null); } /** * Get current {@link AutoScalingConfig}. * * @param watcher optional {@link Watcher} to set on a znode to watch for config changes. * @return current configuration from <code>autoscaling.json</code>. NOTE: * this data is retrieved from ZK on each call. */ @SuppressWarnings({"unchecked"}) public AutoScalingConfig getAutoScalingConfig(Watcher watcher) throws KeeperException, InterruptedException { Stat stat = new Stat(); Map<String, Object> map = new HashMap<>(); try { byte[] bytes = zkClient.getData(SOLR_AUTOSCALING_CONF_PATH, watcher, stat, true); if (bytes != null && bytes.length > 0) { map = (Map<String, Object>) fromJSON(bytes); } } catch (KeeperException.NoNodeException e) { // ignore } map.put(AutoScalingParams.ZK_VERSION, stat.getVersion()); return new AutoScalingConfig(map); } private static class CollectionWatch<T> { int coreRefCount = 0; Set<T> stateWatchers = ConcurrentHashMap.newKeySet(); public boolean canBeRemoved() { return coreRefCount + stateWatchers.size() == 0; } } public static final Set<String> KNOWN_CLUSTER_PROPS = unmodifiableSet(new HashSet<>(asList( LEGACY_CLOUD, URL_SCHEME, AUTO_ADD_REPLICAS, CoreAdminParams.BACKUP_LOCATION, DEFAULT_SHARD_PREFERENCES, MAX_CORES_PER_NODE, SAMPLE_PERCENTAGE, SOLR_ENVIRONMENT, CollectionAdminParams.DEFAULTS))); /** * Returns config set name for collection. * TODO move to DocCollection (state.json). * * @param collection to return config set name for */ public String readConfigName(String collection) throws KeeperException { String configName = null; String path = COLLECTIONS_ZKNODE + "/" + collection; log.debug("Loading collection config from: [{}]", path); try { byte[] data = zkClient.getData(path, null, null, true); if (data == null) { log.warn("No config data found at path {}.", path); throw new KeeperException.NoNodeException("No config data found at path: " + path); } ZkNodeProps props = ZkNodeProps.load(data); configName = props.getStr(CONFIGNAME_PROP); if (configName == null) { log.warn("No config data found at path{}. ", path); throw new KeeperException.NoNodeException("No config data found at path: " + path); } } catch (InterruptedException e) { SolrZkClient.checkInterrupted(e); log.warn("Thread interrupted when loading config name for collection {}", collection); throw new SolrException(ErrorCode.SERVER_ERROR, "Thread interrupted when loading config name for collection " + collection, e); } return configName; } private final SolrZkClient zkClient; private final boolean closeClient; private volatile boolean closed = false; private Set<CountDownLatch> waitLatches = ConcurrentHashMap.newKeySet(); public ZkStateReader(SolrZkClient zkClient) { this(zkClient, null); } public ZkStateReader(SolrZkClient zkClient, Runnable securityNodeListener) { this.zkClient = zkClient; this.configManager = new ZkConfigManager(zkClient); this.closeClient = false; this.securityNodeListener = securityNodeListener; assert ObjectReleaseTracker.track(this); } public ZkStateReader(String zkServerAddress, int zkClientTimeout, int zkClientConnectTimeout) { this.zkClient = new SolrZkClient(zkServerAddress, zkClientTimeout, zkClientConnectTimeout, // on reconnect, reload cloud info new OnReconnect() { @Override public void command() { try { ZkStateReader.this.createClusterStateWatchersAndUpdate(); } catch (KeeperException e) { log.error("A ZK error has occurred", e); throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e); } catch (InterruptedException e) { // Restore the interrupted status Thread.currentThread().interrupt(); log.error("Interrupted", e); throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "Interrupted", e); } } }); this.configManager = new ZkConfigManager(zkClient); this.closeClient = true; this.securityNodeListener = null; assert ObjectReleaseTracker.track(this); } public ZkConfigManager getConfigManager() { return configManager; } /** * Forcibly refresh cluster state from ZK. Do this only to avoid race conditions because it's expensive. * <p> * It is cheaper to call {@link #forceUpdateCollection(String)} on a single collection if you must. * * @lucene.internal */ public void forciblyRefreshAllClusterStateSlow() throws KeeperException, InterruptedException { synchronized (getUpdateLock()) { if (clusterState == null) { // Never initialized, just run normal initialization. createClusterStateWatchersAndUpdate(); return; } // No need to set watchers because we should already have watchers registered for everything. refreshCollectionList(null); refreshLiveNodes(null); refreshLegacyClusterState(null); // Need a copy so we don't delete from what we're iterating over. Collection<String> safeCopy = new ArrayList<>(watchedCollectionStates.keySet()); Set<String> updatedCollections = new HashSet<>(); for (String coll : safeCopy) { DocCollection newState = fetchCollectionState(coll, null, null); if (updateWatchedCollection(coll, newState)) { updatedCollections.add(coll); } } constructState(updatedCollections); } } /** * Forcibly refresh a collection's internal state from ZK. Try to avoid having to resort to this when * a better design is possible. */ //TODO shouldn't we call ZooKeeper.sync() at the right places to prevent reading a stale value? We do so for aliases. public void forceUpdateCollection(String collection) throws KeeperException, InterruptedException { synchronized (getUpdateLock()) { if (clusterState == null) { log.warn("ClusterState watchers have not been initialized"); return; } ClusterState.CollectionRef ref = clusterState.getCollectionRef(collection); if (ref == null || legacyCollectionStates.containsKey(collection)) { // We either don't know anything about this collection (maybe it's new?) or it's legacy. // First update the legacy cluster state. log.debug("Checking legacy cluster state for collection {}", collection); refreshLegacyClusterState(null); if (!legacyCollectionStates.containsKey(collection)) { // No dice, see if a new collection just got created. LazyCollectionRef tryLazyCollection = new LazyCollectionRef(collection); if (tryLazyCollection.get() != null) { // What do you know, it exists! log.debug("Adding lazily-loaded reference for collection {}", collection); lazyCollectionStates.putIfAbsent(collection, tryLazyCollection); constructState(Collections.singleton(collection)); } } } else if (ref.isLazilyLoaded()) { log.debug("Refreshing lazily-loaded state for collection {}", collection); if (ref.get() != null) { return; } // Edge case: if there's no external collection, try refreshing legacy cluster state in case it's there. refreshLegacyClusterState(null); } else if (watchedCollectionStates.containsKey(collection)) { // Exists as a watched collection, force a refresh. log.debug("Forcing refresh of watched collection state for {}", collection); DocCollection newState = fetchCollectionState(collection, null, null); if (updateWatchedCollection(collection, newState)) { constructState(Collections.singleton(collection)); } } else { log.error("Collection {} is not lazy or watched!", collection); } } } /** * Refresh the set of live nodes. */ public void updateLiveNodes() throws KeeperException, InterruptedException { refreshLiveNodes(null); } public Integer compareStateVersions(String coll, int version) { DocCollection collection = clusterState.getCollectionOrNull(coll); if (collection == null) return null; if (collection.getZNodeVersion() < version) { if (log.isDebugEnabled()) { log.debug("Server older than client {}<{}", collection.getZNodeVersion(), version); } DocCollection nu = getCollectionLive(this, coll); if (nu == null) return -1; if (nu.getZNodeVersion() > collection.getZNodeVersion()) { if (updateWatchedCollection(coll, nu)) { synchronized (getUpdateLock()) { constructState(Collections.singleton(coll)); } } collection = nu; } } if (collection.getZNodeVersion() == version) { return null; } if (log.isDebugEnabled()) { log.debug("Wrong version from client [{}]!=[{}]", version, collection.getZNodeVersion()); } return collection.getZNodeVersion(); } @SuppressWarnings({"unchecked"}) public synchronized void createClusterStateWatchersAndUpdate() throws KeeperException, InterruptedException { // We need to fetch the current cluster state and the set of live nodes log.debug("Updating cluster state from ZooKeeper... "); // Sanity check ZK structure. if (!(zkClient.exists(CLUSTER_STATE, true) || zkClient.exists(COLLECTIONS_ZKNODE, true))) { throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Cannot connect to cluster at " + zkClient.getZkServerAddress() + ": cluster not found/not ready"); } // on reconnect of SolrZkClient force refresh and re-add watches. loadClusterProperties(); refreshLiveNodes(new LiveNodeWatcher()); refreshLegacyClusterState(new LegacyClusterStateWatcher()); refreshStateFormat2Collections(); refreshCollectionList(new CollectionsChildWatcher()); refreshAliases(aliasesManager); if (securityNodeListener != null) { addSecurityNodeWatcher(pair -> { ConfigData cd = new ConfigData(); cd.data = pair.first() == null || pair.first().length == 0 ? EMPTY_MAP : Utils.getDeepCopy((Map) fromJSON(pair.first()), 4, false); cd.version = pair.second() == null ? -1 : pair.second().getVersion(); securityData = cd; securityNodeListener.run(); }); securityData = getSecurityProps(true); } collectionPropsObservers.forEach((k, v) -> { collectionPropsWatchers.computeIfAbsent(k, PropsWatcher::new).refreshAndWatch(true); }); } private void addSecurityNodeWatcher(final Callable<Pair<byte[], Stat>> callback) throws KeeperException, InterruptedException { zkClient.exists(SOLR_SECURITY_CONF_PATH, new Watcher() { @Override public void process(WatchedEvent event) { // session events are not change events, and do not remove the watcher if (EventType.None.equals(event.getType())) { return; } try { synchronized (ZkStateReader.this.getUpdateLock()) { log.debug("Updating [{}] ... ", SOLR_SECURITY_CONF_PATH); // remake watch final Stat stat = new Stat(); byte[] data = "{}".getBytes(StandardCharsets.UTF_8); if (EventType.NodeDeleted.equals(event.getType())) { // Node deleted, just recreate watch without attempting a read - SOLR-9679 getZkClient().exists(SOLR_SECURITY_CONF_PATH, this, true); } else { data = getZkClient().getData(SOLR_SECURITY_CONF_PATH, this, stat, true); } try { callback.call(new Pair<>(data, stat)); } catch (Exception e) { log.error("Error running collections node listener", e); } } } catch (KeeperException.ConnectionLossException | KeeperException.SessionExpiredException e) { log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: ", e); } catch (KeeperException e) { log.error("A ZK error has occurred", e); throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "", e); } catch (InterruptedException e) { // Restore the interrupted status Thread.currentThread().interrupt(); log.warn("Interrupted", e); } } }, true); } /** * Construct the total state view from all sources. * Must hold {@link #getUpdateLock()} before calling this. * * @param changedCollections collections that have changed since the last call, * and that should fire notifications */ private void constructState(Set<String> changedCollections) { Set<String> liveNodes = this.liveNodes; // volatile read // Legacy clusterstate is authoritative, for backwards compatibility. // To move a collection's state to format2, first create the new state2 format node, then remove legacy entry. Map<String, ClusterState.CollectionRef> result = new LinkedHashMap<>(legacyCollectionStates); // Add state format2 collections, but don't override legacy collection states. for (Map.Entry<String, DocCollection> entry : watchedCollectionStates.entrySet()) { result.putIfAbsent(entry.getKey(), new ClusterState.CollectionRef(entry.getValue())); } // Finally, add any lazy collections that aren't already accounted for. for (Map.Entry<String, LazyCollectionRef> entry : lazyCollectionStates.entrySet()) { result.putIfAbsent(entry.getKey(), entry.getValue()); } this.clusterState = new ClusterState(liveNodes, result, legacyClusterStateVersion); if (log.isDebugEnabled()) { log.debug("clusterStateSet: legacy [{}] interesting [{}] watched [{}] lazy [{}] total [{}]", legacyCollectionStates.keySet().size(), collectionWatches.keySet().size(), watchedCollectionStates.keySet().size(), lazyCollectionStates.keySet().size(), clusterState.getCollectionStates().size()); } if (log.isTraceEnabled()) { log.trace("clusterStateSet: legacy [{}] interesting [{}] watched [{}] lazy [{}] total [{}]", legacyCollectionStates.keySet(), collectionWatches.keySet(), watchedCollectionStates.keySet(), lazyCollectionStates.keySet(), clusterState.getCollectionStates()); } notifyCloudCollectionsListeners(); for (String collection : changedCollections) { notifyStateWatchers(collection, clusterState.getCollectionOrNull(collection)); } } /** * Refresh legacy (shared) clusterstate.json */ private void refreshLegacyClusterState(Watcher watcher) throws KeeperException, InterruptedException { try { final Stat stat = new Stat(); final byte[] data = zkClient.getData(CLUSTER_STATE, watcher, stat, true); final ClusterState loadedData = ClusterState.load(stat.getVersion(), data, emptySet(), CLUSTER_STATE); synchronized (getUpdateLock()) { if (this.legacyClusterStateVersion >= stat.getVersion()) { // Nothing to do, someone else updated same or newer. return; } Set<String> updatedCollections = new HashSet<>(); for (String coll : this.collectionWatches.keySet()) { ClusterState.CollectionRef ref = this.legacyCollectionStates.get(coll); // legacy collections are always in-memory DocCollection oldState = ref == null ? null : ref.get(); ClusterState.CollectionRef newRef = loadedData.getCollectionStates().get(coll); DocCollection newState = newRef == null ? null : newRef.get(); if (newState == null) { // check that we haven't just migrated newState = watchedCollectionStates.get(coll); } if (!Objects.equals(oldState, newState)) { updatedCollections.add(coll); } } this.legacyCollectionStates = loadedData.getCollectionStates(); this.legacyClusterStateVersion = stat.getVersion(); constructState(updatedCollections); } } catch (KeeperException.NoNodeException e) { // Ignore missing legacy clusterstate.json. synchronized (getUpdateLock()) { this.legacyCollectionStates = emptyMap(); this.legacyClusterStateVersion = 0; constructState(Collections.emptySet()); } } } /** * Refresh state format2 collections. */ private void refreshStateFormat2Collections() { for (String coll : collectionWatches.keySet()) { new StateWatcher(coll).refreshAndWatch(); } } // We don't get a Stat or track versions on getChildren() calls, so force linearization. private final Object refreshCollectionListLock = new Object(); /** * Search for any lazy-loadable state format2 collections. * <p> * A stateFormat=1 collection which is not interesting to us can also * be put into the {@link #lazyCollectionStates} map here. But that is okay * because {@link #constructState(Set)} will give priority to collections in the * shared collection state over this map. * In fact this is a clever way to avoid doing a ZK exists check on * the /collections/collection_name/state.json znode * Such an exists check is done in {@link ClusterState#hasCollection(String)} and * {@link ClusterState#getCollectionsMap()} methods * have a safeguard against exposing wrong collection names to the users */ private void refreshCollectionList(Watcher watcher) throws KeeperException, InterruptedException { synchronized (refreshCollectionListLock) { List<String> children = null; try { children = zkClient.getChildren(COLLECTIONS_ZKNODE, watcher, true); } catch (KeeperException.NoNodeException e) { log.warn("Error fetching collection names: ", e); // fall through } if (children == null || children.isEmpty()) { lazyCollectionStates.clear(); return; } // Don't lock getUpdateLock() here, we don't need it and it would cause deadlock. // Don't mess with watchedCollections, they should self-manage. // First, drop any children that disappeared. this.lazyCollectionStates.keySet().retainAll(children); for (String coll : children) { // We will create an eager collection for any interesting collections, so don't add to lazy. if (!collectionWatches.containsKey(coll)) { // Double check contains just to avoid allocating an object. LazyCollectionRef existing = lazyCollectionStates.get(coll); if (existing == null) { lazyCollectionStates.putIfAbsent(coll, new LazyCollectionRef(coll)); } } } } } // We don't get a Stat or track versions on getChildren() calls, so force linearization. private final Object refreshCollectionsSetLock = new Object(); // Ensures that only the latest getChildren fetch gets applied. private final AtomicReference<Set<String>> lastFetchedCollectionSet = new AtomicReference<>(); /** * Register a CloudCollectionsListener to be called when the set of collections within a cloud changes. */ public void registerCloudCollectionsListener(CloudCollectionsListener cloudCollectionsListener) { cloudCollectionsListeners.add(cloudCollectionsListener); notifyNewCloudCollectionsListener(cloudCollectionsListener); } /** * Remove a registered CloudCollectionsListener. */ public void removeCloudCollectionsListener(CloudCollectionsListener cloudCollectionsListener) { cloudCollectionsListeners.remove(cloudCollectionsListener); } private void notifyNewCloudCollectionsListener(CloudCollectionsListener listener) { listener.onChange(Collections.emptySet(), lastFetchedCollectionSet.get()); } private void notifyCloudCollectionsListeners() { notifyCloudCollectionsListeners(false); } private void notifyCloudCollectionsListeners(boolean notifyIfSame) { synchronized (refreshCollectionsSetLock) { final Set<String> newCollections = getCurrentCollections(); final Set<String> oldCollections = lastFetchedCollectionSet.getAndSet(newCollections); if (!newCollections.equals(oldCollections) || notifyIfSame) { cloudCollectionsListeners.forEach(listener -> listener.onChange(oldCollections, newCollections)); } } } private Set<String> getCurrentCollections() { Set<String> collections = new HashSet<>(); collections.addAll(legacyCollectionStates.keySet()); collections.addAll(watchedCollectionStates.keySet()); collections.addAll(lazyCollectionStates.keySet()); return collections; } private class LazyCollectionRef extends ClusterState.CollectionRef { private final String collName; private volatile long lastUpdateTime; private DocCollection cachedDocCollection; public LazyCollectionRef(String collName) { super(null); this.collName = collName; this.lastUpdateTime = -1; } @Override public synchronized DocCollection get(boolean allowCached) { gets.incrementAndGet(); if (!allowCached || lastUpdateTime < 0 || System.nanoTime() - lastUpdateTime > LAZY_CACHE_TIME) { boolean shouldFetch = true; if (cachedDocCollection != null) { Stat freshStats = null; try { freshStats = zkClient.exists(getCollectionPath(collName), null, true); } catch (Exception e) { } if (freshStats != null && !cachedDocCollection.isModified(freshStats.getVersion(), freshStats.getCversion())) { shouldFetch = false; } } if (shouldFetch) { cachedDocCollection = getCollectionLive(ZkStateReader.this, collName); lastUpdateTime = System.nanoTime(); } } return cachedDocCollection; } @Override public boolean isLazilyLoaded() { return true; } @Override public String toString() { return "LazyCollectionRef(" + collName + ")"; } } // We don't get a Stat or track versions on getChildren() calls, so force linearization. private final Object refreshLiveNodesLock = new Object(); // Ensures that only the latest getChildren fetch gets applied. private final AtomicReference<SortedSet<String>> lastFetchedLiveNodes = new AtomicReference<>(); /** * Refresh live_nodes. */ private void refreshLiveNodes(Watcher watcher) throws KeeperException, InterruptedException { synchronized (refreshLiveNodesLock) { SortedSet<String> newLiveNodes; try { List<String> nodeList = zkClient.getChildren(LIVE_NODES_ZKNODE, watcher, true); newLiveNodes = new TreeSet<>(nodeList); } catch (KeeperException.NoNodeException e) { newLiveNodes = emptySortedSet(); } lastFetchedLiveNodes.set(newLiveNodes); } // Can't lock getUpdateLock() until we release the other, it would cause deadlock. SortedSet<String> oldLiveNodes, newLiveNodes; synchronized (getUpdateLock()) { newLiveNodes = lastFetchedLiveNodes.getAndSet(null); if (newLiveNodes == null) { // Someone else won the race to apply the last update, just exit. return; } oldLiveNodes = this.liveNodes; this.liveNodes = newLiveNodes; if (clusterState != null) { clusterState.setLiveNodes(newLiveNodes); } } if (oldLiveNodes.size() != newLiveNodes.size()) { if (log.isInfoEnabled()) { log.info("Updated live nodes from ZooKeeper... ({}) -> ({})", oldLiveNodes.size(), newLiveNodes.size()); } } if (log.isDebugEnabled()) { log.debug("Updated live nodes from ZooKeeper... {} -> {}", oldLiveNodes, newLiveNodes); } if (!oldLiveNodes.equals(newLiveNodes)) { // fire listeners liveNodesListeners.forEach(listener -> { if (listener.onChange(new TreeSet<>(oldLiveNodes), new TreeSet<>(newLiveNodes))) { removeLiveNodesListener(listener); } }); } } public void registerClusterPropertiesListener(ClusterPropertiesListener listener) { // fire it once with current properties if (listener.onChange(getClusterProperties())) { removeClusterPropertiesListener(listener); } else { clusterPropertiesListeners.add(listener); } } public void removeClusterPropertiesListener(ClusterPropertiesListener listener) { clusterPropertiesListeners.remove(listener); } public void registerLiveNodesListener(LiveNodesListener listener) { // fire it once with current live nodes if (listener.onChange(new TreeSet<>(getClusterState().getLiveNodes()), new TreeSet<>(getClusterState().getLiveNodes()))) { removeLiveNodesListener(listener); } liveNodesListeners.add(listener); } public void removeLiveNodesListener(LiveNodesListener listener) { liveNodesListeners.remove(listener); } /** * @return information about the cluster from ZooKeeper */ public ClusterState getClusterState() { return clusterState; } public Object getUpdateLock() { return this; } public void close() { this.closed = true; notifications.shutdownNow(); waitLatches.parallelStream().forEach(c -> { c.countDown(); }); ExecutorUtil.shutdownAndAwaitTermination(notifications); ExecutorUtil.shutdownAndAwaitTermination(collectionPropsNotifications); if (closeClient) { zkClient.close(); } assert ObjectReleaseTracker.release(this); } @Override public boolean isClosed() { return closed; } public String getLeaderUrl(String collection, String shard, int timeout) throws InterruptedException { Replica replica = getLeaderRetry(collection, shard, timeout); if (replica == null || replica.getBaseUrl() == null) { return null; } ZkCoreNodeProps props = new ZkCoreNodeProps(replica); return props.getCoreUrl(); } public Replica getLeader(Set<String> liveNodes, DocCollection docCollection, String shard) { Replica replica = docCollection != null ? docCollection.getLeader(shard) : null; if (replica != null && liveNodes.contains(replica.getNodeName())) { return replica; } return null; } public Replica getLeader(String collection, String shard) { if (clusterState != null) { DocCollection docCollection = clusterState.getCollectionOrNull(collection); Replica replica = docCollection != null ? docCollection.getLeader(shard) : null; if (replica != null && getClusterState().liveNodesContain(replica.getNodeName())) { return replica; } } return null; } public boolean isNodeLive(String node) { return liveNodes.contains(node); } /** * Get shard leader properties, with retry if none exist. */ public Replica getLeaderRetry(String collection, String shard) throws InterruptedException { return getLeaderRetry(collection, shard, GET_LEADER_RETRY_DEFAULT_TIMEOUT); } /** * Get shard leader properties, with retry if none exist. */ public Replica getLeaderRetry(String collection, String shard, int timeout) throws InterruptedException { AtomicReference<DocCollection> coll = new AtomicReference<>(); AtomicReference<Replica> leader = new AtomicReference<>(); try { waitForState(collection, timeout, TimeUnit.MILLISECONDS, (n, c) -> { if (c == null) return false; coll.set(c); Replica l = getLeader(n, c, shard); if (l != null) { log.debug("leader found for {}/{} to be {}", collection, shard, l); leader.set(l); return true; } return false; }); } catch (TimeoutException e) { throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "No registered leader was found after waiting for " + timeout + "ms " + ", collection: " + collection + " slice: " + shard + " saw state=" + clusterState.getCollectionOrNull(collection) + " with live_nodes=" + clusterState.getLiveNodes()); } return leader.get(); } /** * Get path where shard leader properties live in zookeeper. */ public static String getShardLeadersPath(String collection, String shardId) { return COLLECTIONS_ZKNODE + "/" + collection + "/" + SHARD_LEADERS_ZKNODE + (shardId != null ? ("/" + shardId) : "") + "/leader"; } /** * Get path where shard leader elections ephemeral nodes are. */ public static String getShardLeadersElectPath(String collection, String shardId) { return COLLECTIONS_ZKNODE + "/" + collection + "/" + LEADER_ELECT_ZKNODE + (shardId != null ? ("/" + shardId + "/" + ELECTION_NODE) : ""); } public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName) { return getReplicaProps(collection, shardId, thisCoreNodeName, null); } public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName, Replica.State mustMatchStateFilter) { return getReplicaProps(collection, shardId, thisCoreNodeName, mustMatchStateFilter, null); } public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName, Replica.State mustMatchStateFilter, Replica.State mustNotMatchStateFilter) { //TODO: We don't need all these getReplicaProps method overloading. Also, it's odd that the default is to return replicas of type TLOG and NRT only return getReplicaProps(collection, shardId, thisCoreNodeName, mustMatchStateFilter, null, EnumSet.of(Replica.Type.TLOG, Replica.Type.NRT)); } public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName, Replica.State mustMatchStateFilter, Replica.State mustNotMatchStateFilter, final EnumSet<Replica.Type> acceptReplicaType) { assert thisCoreNodeName != null; ClusterState clusterState = this.clusterState; if (clusterState == null) { return null; } final DocCollection docCollection = clusterState.getCollectionOrNull(collection); if (docCollection == null || docCollection.getSlicesMap() == null) { throw new ZooKeeperException(ErrorCode.BAD_REQUEST, "Could not find collection in zk: " + collection); } Map<String, Slice> slices = docCollection.getSlicesMap(); Slice replicas = slices.get(shardId); if (replicas == null) { throw new ZooKeeperException(ErrorCode.BAD_REQUEST, "Could not find shardId in zk: " + shardId); } Map<String, Replica> shardMap = replicas.getReplicasMap(); List<ZkCoreNodeProps> nodes = new ArrayList<>(shardMap.size()); for (Entry<String, Replica> entry : shardMap.entrySet().stream().filter((e) -> acceptReplicaType.contains(e.getValue().getType())).collect(Collectors.toList())) { ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(entry.getValue()); String coreNodeName = entry.getValue().getName(); if (clusterState.liveNodesContain(nodeProps.getNodeName()) && !coreNodeName.equals(thisCoreNodeName)) { if (mustMatchStateFilter == null || mustMatchStateFilter == Replica.State.getState(nodeProps.getState())) { if (mustNotMatchStateFilter == null || mustNotMatchStateFilter != Replica.State.getState(nodeProps.getState())) { nodes.add(nodeProps); } } } } if (nodes.size() == 0) { // no replicas return null; } return nodes; } public SolrZkClient getZkClient() { return zkClient; } /** * Get a cluster property * <p> * N.B. Cluster properties are updated via ZK watchers, and so may not necessarily * be completely up-to-date. If you need to get the latest version, then use a * {@link ClusterProperties} instance. * * @param key the property to read * @param defaultValue a default value to use if no such property exists * @param <T> the type of the property * @return the cluster property, or a default if the property is not set */ @SuppressWarnings("unchecked") public <T> T getClusterProperty(String key, T defaultValue) { T value = (T) Utils.getObjectByPath(clusterProperties, false, key); if (value == null) return defaultValue; return value; } /** * Same as the above but allows a full json path as a list of parts * * @param keyPath path to the property example ["collectionDefauls", "numShards"] * @param defaultValue a default value to use if no such property exists * @return the cluster property, or a default if the property is not set */ @SuppressWarnings({"unchecked"}) public <T> T getClusterProperty(List<String> keyPath, T defaultValue) { T value = (T) Utils.getObjectByPath(clusterProperties, false, keyPath); if (value == null) return defaultValue; return value; } /** * Get all cluster properties for this cluster * <p> * N.B. Cluster properties are updated via ZK watchers, and so may not necessarily * be completely up-to-date. If you need to get the latest version, then use a * {@link ClusterProperties} instance. * * @return a Map of cluster properties */ public Map<String, Object> getClusterProperties() { return Collections.unmodifiableMap(clusterProperties); } private final Watcher clusterPropertiesWatcher = event -> { // session events are not change events, and do not remove the watcher if (Watcher.Event.EventType.None.equals(event.getType())) { return; } loadClusterProperties(); }; @SuppressWarnings("unchecked") private void loadClusterProperties() { try { while (true) { try { byte[] data = zkClient.getData(ZkStateReader.CLUSTER_PROPS, clusterPropertiesWatcher, new Stat(), true); this.clusterProperties = ClusterProperties.convertCollectionDefaultsToNestedFormat((Map<String, Object>) Utils.fromJSON(data)); log.debug("Loaded cluster properties: {}", this.clusterProperties); // Make the urlScheme globally accessible UrlScheme.INSTANCE.setUrlScheme(getClusterProperty(ZkStateReader.URL_SCHEME, HTTP)); for (ClusterPropertiesListener listener : clusterPropertiesListeners) { listener.onChange(getClusterProperties()); } return; } catch (KeeperException.NoNodeException e) { this.clusterProperties = Collections.emptyMap(); log.debug("Loaded empty cluster properties"); // set an exists watch, and if the node has been created since the last call, // read the data again if (zkClient.exists(ZkStateReader.CLUSTER_PROPS, clusterPropertiesWatcher, true) == null) return; } } } catch (KeeperException | InterruptedException e) { log.error("Error reading cluster properties from zookeeper", SolrZkClient.checkInterrupted(e)); } } /** * Get collection properties for a given collection. If the collection is watched, simply return it from the cache, * otherwise fetch it directly from zookeeper. This is a convenience for {@code getCollectionProperties(collection,0)} * * @param collection the collection for which properties are desired * @return a map representing the key/value properties for the collection. */ public Map<String, String> getCollectionProperties(final String collection) { return getCollectionProperties(collection, 0); } /** * Get and cache collection properties for a given collection. If the collection is watched, or still cached * simply return it from the cache, otherwise fetch it directly from zookeeper and retain the value for at * least cacheForMillis milliseconds. Cached properties are watched in zookeeper and updated automatically. * This version of {@code getCollectionProperties} should be used when properties need to be consulted * frequently in the absence of an active {@link CollectionPropsWatcher}. * * @param collection The collection for which properties are desired * @param cacheForMillis The minimum number of milliseconds to maintain a cache for the specified collection's * properties. Setting a {@code CollectionPropsWatcher} will override this value and retain * the cache for the life of the watcher. A lack of changes in zookeeper may allow the * caching to remain for a greater duration up to the cycle time of {@link CacheCleaner}. * Passing zero for this value will explicitly remove the cached copy if and only if it is * due to expire and no watch exists. Any positive value will extend the expiration time * if required. * @return a map representing the key/value properties for the collection. */ public Map<String, String> getCollectionProperties(final String collection, long cacheForMillis) { synchronized (watchedCollectionProps) { // making decisions based on the result of a get... Watcher watcher = null; if (cacheForMillis > 0) { watcher = collectionPropsWatchers.compute(collection, (c, w) -> w == null ? new PropsWatcher(c, cacheForMillis) : w.renew(cacheForMillis)); } VersionedCollectionProps vprops = watchedCollectionProps.get(collection); boolean haveUnexpiredProps = vprops != null && vprops.cacheUntilNs > System.nanoTime(); long untilNs = System.nanoTime() + TimeUnit.NANOSECONDS.convert(cacheForMillis, TimeUnit.MILLISECONDS); Map<String, String> properties; if (haveUnexpiredProps) { properties = vprops.props; vprops.cacheUntilNs = Math.max(vprops.cacheUntilNs, untilNs); } else { try { VersionedCollectionProps vcp = fetchCollectionProperties(collection, watcher); properties = vcp.props; if (cacheForMillis > 0) { vcp.cacheUntilNs = untilNs; watchedCollectionProps.put(collection, vcp); } else { // we're synchronized on watchedCollectionProps and we can only get here if we have found an expired // vprops above, so it is safe to remove the cached value and let the GC free up some mem a bit sooner. if (!collectionPropsObservers.containsKey(collection)) { watchedCollectionProps.remove(collection); } } } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading collection properties", SolrZkClient.checkInterrupted(e)); } } return properties; } } private class VersionedCollectionProps { int zkVersion; Map<String, String> props; long cacheUntilNs = 0; VersionedCollectionProps(int zkVersion, Map<String, String> props) { this.zkVersion = zkVersion; this.props = props; } } static String getCollectionPropsPath(final String collection) { return COLLECTIONS_ZKNODE + '/' + collection + '/' + COLLECTION_PROPS_ZKNODE; } @SuppressWarnings("unchecked") private VersionedCollectionProps fetchCollectionProperties(String collection, Watcher watcher) throws KeeperException, InterruptedException { final String znodePath = getCollectionPropsPath(collection); // lazy init cache cleaner once we know someone is using collection properties. if (collectionPropsCacheCleaner == null) { synchronized (this) { // There can be only one! :) if (collectionPropsCacheCleaner == null) { collectionPropsCacheCleaner = notifications.submit(new CacheCleaner()); } } } while (true) { try { Stat stat = new Stat(); byte[] data = zkClient.getData(znodePath, watcher, stat, true); return new VersionedCollectionProps(stat.getVersion(), (Map<String, String>) Utils.fromJSON(data)); } catch (ClassCastException e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to parse collection properties for collection " + collection, e); } catch (KeeperException.NoNodeException e) { if (watcher != null) { // Leave an exists watch in place in case a collectionprops.json is created later. Stat exists = zkClient.exists(znodePath, watcher, true); if (exists != null) { // Rare race condition, we tried to fetch the data and couldn't find it, then we found it exists. // Loop and try again. continue; } } return new VersionedCollectionProps(-1, EMPTY_MAP); } } } /** * Returns the content of /security.json from ZooKeeper as a Map * If the files doesn't exist, it returns null. */ @SuppressWarnings({"unchecked"}) public ConfigData getSecurityProps(boolean getFresh) { if (!getFresh) { if (securityData == null) return new ConfigData(EMPTY_MAP, -1); return new ConfigData(securityData.data, securityData.version); } try { Stat stat = new Stat(); if (getZkClient().exists(SOLR_SECURITY_CONF_PATH, true)) { final byte[] data = getZkClient().getData(ZkStateReader.SOLR_SECURITY_CONF_PATH, null, stat, true); return data != null && data.length > 0 ? new ConfigData((Map<String, Object>) Utils.fromJSON(data), stat.getVersion()) : null; } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading security properties", e); } catch (KeeperException e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading security properties", e); } return null; } /** * Returns the baseURL corresponding to a given node's nodeName -- * NOTE: does not (currently) imply that the nodeName (or resulting * baseURL) exists in the cluster. * * @lucene.experimental */ public String getBaseUrlForNodeName(final String nodeName) { return Utils.getBaseUrlForNodeName(nodeName, getClusterProperty(URL_SCHEME, "http")); } /** * Watches a single collection's format2 state.json. */ class StateWatcher implements Watcher { private final String coll; private final String collectionPath; StateWatcher(String coll) { this.coll = coll; collectionPath = getCollectionPath(coll); } @Override public void process(WatchedEvent event) { // session events are not change events, and do not remove the watcher if (EventType.None.equals(event.getType())) { return; } if (!collectionWatches.containsKey(coll)) { // This collection is no longer interesting, stop watching. log.debug("Uninteresting collection {}", coll); return; } Set<String> liveNodes = ZkStateReader.this.liveNodes; if (log.isInfoEnabled()) { log.info("A cluster state change: [{}] for collection [{}] has occurred - updating... (live nodes size: [{}])", event, coll, liveNodes.size()); } refreshAndWatch(event.getType()); } public void refreshAndWatch() { refreshAndWatch(null); } /** * Refresh collection state from ZK and leave a watch for future changes. * As a side effect, updates {@link #clusterState} and {@link #watchedCollectionStates} * with the results of the refresh. */ public void refreshAndWatch(EventType eventType) { try { if (eventType == null || eventType == EventType.NodeChildrenChanged) { refreshAndWatchChildren(); if (eventType == EventType.NodeChildrenChanged) { //only per-replica states modified. return return; } } DocCollection newState = fetchCollectionState(coll, this, collectionPath); updateWatchedCollection(coll, newState); synchronized (getUpdateLock()) { constructState(Collections.singleton(coll)); } } catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) { log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: ", e); } catch (KeeperException e) { log.error("Unwatched collection: [{}]", coll, e); throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); log.error("Unwatched collection: [{}]", coll, e); } } private void refreshAndWatchChildren() throws KeeperException, InterruptedException { Stat stat = new Stat(); List<String> replicaStates = null; try { replicaStates = zkClient.getChildren(collectionPath, this, stat, true); PerReplicaStates newStates = new PerReplicaStates(collectionPath, stat.getCversion(), replicaStates); DocCollection oldState = watchedCollectionStates.get(coll); final DocCollection newState = oldState != null ? oldState.copyWith(newStates) : fetchCollectionState(coll, null, collectionPath); updateWatchedCollection(coll, newState); synchronized (getUpdateLock()) { constructState(Collections.singleton(coll)); } if (log.isDebugEnabled()) { log.debug("updated per-replica states changed for: {}, ver: {} , new vals: {}", coll, stat.getCversion(), replicaStates); } } catch (NoNodeException e) { log.info("{} is deleted, stop watching children", collectionPath); } } } /** * Watches the legacy clusterstate.json. */ class LegacyClusterStateWatcher implements Watcher { @Override public void process(WatchedEvent event) { // session events are not change events, and do not remove the watcher if (EventType.None.equals(event.getType())) { return; } int liveNodesSize = ZkStateReader.this.clusterState == null ? 0 : ZkStateReader.this.clusterState.getLiveNodes().size(); log.debug("A cluster state change: [{}], has occurred - updating... (live nodes size: [{}])", event, liveNodesSize); refreshAndWatch(); } /** * Must hold {@link #getUpdateLock()} before calling this method. */ public void refreshAndWatch() { try { refreshLegacyClusterState(this); } catch (KeeperException.NoNodeException e) { throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Cannot connect to cluster at " + zkClient.getZkServerAddress() + ": cluster not found/not ready"); } catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) { log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage()); } catch (KeeperException e) { log.error("A ZK error has occurred", e); throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e); } catch (InterruptedException e) { // Restore the interrupted status Thread.currentThread().interrupt(); log.warn("Interrupted", e); } } } /** * Watches collection properties */ class PropsWatcher implements Watcher { private final String coll; private long watchUntilNs; PropsWatcher(String coll) { this.coll = coll; watchUntilNs = 0; } PropsWatcher(String coll, long forMillis) { this.coll = coll; watchUntilNs = System.nanoTime() + TimeUnit.NANOSECONDS.convert(forMillis, TimeUnit.MILLISECONDS); } public PropsWatcher renew(long forMillis) { watchUntilNs = System.nanoTime() + TimeUnit.NANOSECONDS.convert(forMillis, TimeUnit.MILLISECONDS); return this; } @Override public void process(WatchedEvent event) { // session events are not change events, and do not remove the watcher if (EventType.None.equals(event.getType())) { return; } boolean expired = System.nanoTime() > watchUntilNs; if (!collectionPropsObservers.containsKey(coll) && expired) { // No one can be notified of the change, we can ignore it and "unset" the watch log.debug("Ignoring property change for collection {}", coll); return; } log.info("A collection property change: [{}] for collection [{}] has occurred - updating...", event, coll); refreshAndWatch(true); } /** * Refresh collection properties from ZK and leave a watch for future changes. Updates the properties in * watchedCollectionProps with the results of the refresh. Optionally notifies watchers */ void refreshAndWatch(boolean notifyWatchers) { try { synchronized (watchedCollectionProps) { // making decisions based on the result of a get... VersionedCollectionProps vcp = fetchCollectionProperties(coll, this); Map<String, String> properties = vcp.props; VersionedCollectionProps existingVcp = watchedCollectionProps.get(coll); if (existingVcp == null || // never called before, record what we found vcp.zkVersion > existingVcp.zkVersion || // newer info we should update vcp.zkVersion == -1) { // node was deleted start over watchedCollectionProps.put(coll, vcp); if (notifyWatchers) { notifyPropsWatchers(coll, properties); } if (vcp.zkVersion == -1 && existingVcp != null) { // Collection DELETE detected // We should not be caching a collection that has been deleted. watchedCollectionProps.remove(coll); // core ref counting not relevant here, don't need canRemove(), we just sent // a notification of an empty set of properties, no reason to watch what doesn't exist. collectionPropsObservers.remove(coll); // This is the one time we know it's safe to throw this out. We just failed to set the watch // due to an NoNodeException, so it isn't held by ZK and can't re-set itself due to an update. collectionPropsWatchers.remove(coll); } } } } catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) { log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: ", e); } catch (KeeperException e) { log.error("Lost collection property watcher for {} due to ZK error", coll, e); throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); log.error("Lost collection property watcher for {} due to the thread being interrupted", coll, e); } } } /** * Watches /collections children . */ class CollectionsChildWatcher implements Watcher { @Override public void process(WatchedEvent event) { if (ZkStateReader.this.closed) { return; } // session events are not change events, and do not remove the watcher if (EventType.None.equals(event.getType())) { return; } log.debug("A collections change: [{}], has occurred - updating...", event); refreshAndWatch(); synchronized (getUpdateLock()) { constructState(Collections.emptySet()); } } /** * Must hold {@link #getUpdateLock()} before calling this method. */ public void refreshAndWatch() { try { refreshCollectionList(this); } catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) { log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: ", e); } catch (KeeperException e) { log.error("A ZK error has occurred", e); throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e); } catch (InterruptedException e) { // Restore the interrupted status Thread.currentThread().interrupt(); log.warn("Interrupted", e); } } } /** * Watches the live_nodes and syncs changes. */ class LiveNodeWatcher implements Watcher { @Override public void process(WatchedEvent event) { // session events are not change events, and do not remove the watcher if (EventType.None.equals(event.getType())) { return; } if (log.isDebugEnabled()) { log.debug("A live node change: [{}], has occurred - updating... (live nodes size: [{}])", event, liveNodes.size()); } refreshAndWatch(); } public void refreshAndWatch() { try { refreshLiveNodes(this); } catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) { log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: ", e); } catch (KeeperException e) { log.error("A ZK error has occurred", e); throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e); } catch (InterruptedException e) { // Restore the interrupted status Thread.currentThread().interrupt(); log.warn("Interrupted", e); } } } public static DocCollection getCollectionLive(ZkStateReader zkStateReader, String coll) { try { return zkStateReader.fetchCollectionState(coll, null, null); } catch (KeeperException e) { throw new SolrException(ErrorCode.BAD_REQUEST, "Could not load collection from ZK: " + coll, e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new SolrException(ErrorCode.BAD_REQUEST, "Could not load collection from ZK: " + coll, e); } } public DocCollection fetchCollectionState(String coll, Watcher watcher, String path) throws KeeperException, InterruptedException { String collectionPath = path == null ? getCollectionPath(coll) : path; while (true) { ClusterState.initReplicaStateProvider(() -> { try { PerReplicaStates replicaStates = PerReplicaStates.fetch(collectionPath, zkClient, null); log.debug("per-replica-state ver: {} fetched for initializing {} ", replicaStates.cversion, collectionPath); return replicaStates; } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Error fetching per-replica-states"); } }); try { Stat stat = new Stat(); byte[] data = zkClient.getData(collectionPath, watcher, stat, true); ClusterState state = ClusterState.load(stat.getVersion(), data, Collections.emptySet(), collectionPath); ClusterState.CollectionRef collectionRef = state.getCollectionStates().get(coll); return collectionRef == null ? null : collectionRef.get(); } catch (KeeperException.NoNodeException e) { if (watcher != null) { // Leave an exists watch in place in case a state.json is created later. Stat exists = zkClient.exists(collectionPath, watcher, true); if (exists != null) { // Rare race condition, we tried to fetch the data and couldn't find it, then we found it exists. // Loop and try again. continue; } } return null; } finally { ClusterState.clearReplicaStateProvider(); } } } public static String getCollectionPathRoot(String coll) { return COLLECTIONS_ZKNODE + "/" + coll; } public static String getCollectionPath(String coll) { return getCollectionPathRoot(coll) + "/state.json"; } /** * Notify this reader that a local Core is a member of a collection, and so that collection * state should be watched. * <p> * Not a public API. This method should only be called from ZkController. * <p> * The number of cores per-collection is tracked, and adding multiple cores from the same * collection does not increase the number of watches. * * @param collection the collection that the core is a member of * @see ZkStateReader#unregisterCore(String) */ public void registerCore(String collection) { AtomicBoolean reconstructState = new AtomicBoolean(false); collectionWatches.compute(collection, (k, v) -> { if (v == null) { reconstructState.set(true); v = new CollectionWatch<>(); } v.coreRefCount++; return v; }); if (reconstructState.get()) { new StateWatcher(collection).refreshAndWatch(); } } /** * Notify this reader that a local core that is a member of a collection has been closed. * <p> * Not a public API. This method should only be called from ZkController. * <p> * If no cores are registered for a collection, and there are no {@link CollectionStateWatcher}s * for that collection either, the collection watch will be removed. * * @param collection the collection that the core belongs to */ public void unregisterCore(String collection) { AtomicBoolean reconstructState = new AtomicBoolean(false); collectionWatches.compute(collection, (k, v) -> { if (v == null) return null; if (v.coreRefCount > 0) v.coreRefCount--; if (v.canBeRemoved()) { watchedCollectionStates.remove(collection); lazyCollectionStates.put(collection, new LazyCollectionRef(collection)); reconstructState.set(true); return null; } return v; }); if (reconstructState.get()) { synchronized (getUpdateLock()) { constructState(Collections.emptySet()); } } } /** * Register a CollectionStateWatcher to be called when the state of a collection changes * <em>or</em> the set of live nodes changes. * * <p> * The Watcher will automatically be removed when it's * <code>onStateChanged</code> returns <code>true</code> * </p> * * <p> * This is method is just syntactic sugar for registering both a {@link DocCollectionWatcher} and * a {@link LiveNodesListener}. Callers that only care about one or the other (but not both) are * encouraged to use the more specific methods register methods as it may reduce the number of * ZooKeeper watchers needed, and reduce the amount of network/cpu used. * </p> * * @see #registerDocCollectionWatcher * @see #registerLiveNodesListener */ public void registerCollectionStateWatcher(String collection, CollectionStateWatcher stateWatcher) { final DocCollectionAndLiveNodesWatcherWrapper wrapper = new DocCollectionAndLiveNodesWatcherWrapper(collection, stateWatcher); registerDocCollectionWatcher(collection, wrapper); registerLiveNodesListener(wrapper); DocCollection state = clusterState.getCollectionOrNull(collection); if (stateWatcher.onStateChanged(liveNodes, state) == true) { removeCollectionStateWatcher(collection, stateWatcher); } } /** * Register a DocCollectionWatcher to be called when the state of a collection changes * * <p> * The Watcher will automatically be removed when it's * <code>onStateChanged</code> returns <code>true</code> * </p> */ public void registerDocCollectionWatcher(String collection, DocCollectionWatcher stateWatcher) { AtomicBoolean watchSet = new AtomicBoolean(false); collectionWatches.compute(collection, (k, v) -> { if (v == null) { v = new CollectionWatch<>(); watchSet.set(true); } log.debug("already watching , added to stateWatchers"); v.stateWatchers.add(stateWatcher); return v; }); if (watchSet.get()) { new StateWatcher(collection).refreshAndWatch(); } DocCollection state = clusterState.getCollectionOrNull(collection); state = updatePerReplicaState(state); if (stateWatcher.onStateChanged(state) == true) { removeDocCollectionWatcher(collection, stateWatcher); } } private DocCollection updatePerReplicaState(DocCollection c) { if (c == null || !c.isPerReplicaState()) return c; PerReplicaStates current = c.getPerReplicaStates(); PerReplicaStates newPrs = PerReplicaStates.fetch(c.getZNode(), zkClient, current); if (newPrs != current) { log.debug("just-in-time update for a fresh per-replica-state {}", c.getName()); DocCollection modifiedColl = c.copyWith(newPrs); updateWatchedCollection(c.getName(), modifiedColl); return modifiedColl; } else { return c; } } /** * Block until a CollectionStatePredicate returns true, or the wait times out * * <p> * Note that the predicate may be called again even after it has returned true, so * implementors should avoid changing state within the predicate call itself. * </p> * * <p> * This implementation utilizes {@link CollectionStateWatcher} internally. * Callers that don't care about liveNodes are encouraged to use a {@link DocCollection} {@link Predicate} * instead * </p> * * @param collection the collection to watch * @param wait how long to wait * @param unit the units of the wait parameter * @param predicate the predicate to call on state changes * @throws InterruptedException on interrupt * @throws TimeoutException on timeout * @see #waitForState(String, long, TimeUnit, Predicate) * @see #registerCollectionStateWatcher */ public void waitForState(final String collection, long wait, TimeUnit unit, CollectionStatePredicate predicate) throws InterruptedException, TimeoutException { if (closed) { throw new AlreadyClosedException(); } final CountDownLatch latch = new CountDownLatch(1); waitLatches.add(latch); AtomicReference<DocCollection> docCollection = new AtomicReference<>(); CollectionStateWatcher watcher = (n, c) -> { docCollection.set(c); boolean matches = predicate.matches(n, c); if (!matches) { if (log.isDebugEnabled()) { log.debug(" CollectionStatePredicate failed for {}, cversion : {}", collection, (c == null || c.getPerReplicaStates() == null ? "-1" : c.getPerReplicaStates())); } } if (matches) latch.countDown(); return matches; }; registerCollectionStateWatcher(collection, watcher); try { // wait for the watcher predicate to return true, or time out if (!latch.await(wait, unit)) throw new TimeoutException("Timeout waiting to see state for collection=" + collection + " :" + docCollection.get()); } finally { removeCollectionStateWatcher(collection, watcher); waitLatches.remove(latch); } } /** * Block until a Predicate returns true, or the wait times out * * <p> * Note that the predicate may be called again even after it has returned true, so * implementors should avoid changing state within the predicate call itself. * </p> * * @param collection the collection to watch * @param wait how long to wait * @param unit the units of the wait parameter * @param predicate the predicate to call on state changes * @throws InterruptedException on interrupt * @throws TimeoutException on timeout */ public void waitForState(final String collection, long wait, TimeUnit unit, Predicate<DocCollection> predicate) throws InterruptedException, TimeoutException { if (log.isDebugEnabled()) { log.debug("Waiting up to {}ms for state {}", unit.toMillis(wait), predicate); } if (closed) { throw new AlreadyClosedException(); } final CountDownLatch latch = new CountDownLatch(1); waitLatches.add(latch); AtomicReference<DocCollection> docCollection = new AtomicReference<>(); DocCollectionWatcher watcher = (c) -> { docCollection.set(c); boolean matches = predicate.test(c); if (matches) latch.countDown(); return matches; }; registerDocCollectionWatcher(collection, watcher); try { // wait for the watcher predicate to return true, or time out if (!latch.await(wait, unit)) throw new TimeoutException("Timeout waiting to see state for collection=" + collection + " :" + docCollection.get()); } finally { removeDocCollectionWatcher(collection, watcher); waitLatches.remove(latch); if (log.isDebugEnabled()) { log.debug("Completed wait for {}", predicate); } } } /** * Block until a LiveNodesStatePredicate returns true, or the wait times out * <p> * Note that the predicate may be called again even after it has returned true, so * implementors should avoid changing state within the predicate call itself. * </p> * * @param wait how long to wait * @param unit the units of the wait parameter * @param predicate the predicate to call on state changes * @throws InterruptedException on interrupt * @throws TimeoutException on timeout */ public void waitForLiveNodes(long wait, TimeUnit unit, LiveNodesPredicate predicate) throws InterruptedException, TimeoutException { if (closed) { throw new AlreadyClosedException(); } final CountDownLatch latch = new CountDownLatch(1); waitLatches.add(latch); LiveNodesListener listener = (o, n) -> { boolean matches = predicate.matches(o, n); if (matches) latch.countDown(); return matches; }; registerLiveNodesListener(listener); try { // wait for the watcher predicate to return true, or time out if (!latch.await(wait, unit)) throw new TimeoutException("Timeout waiting for live nodes, currently they are: " + getClusterState().getLiveNodes()); } finally { removeLiveNodesListener(listener); waitLatches.remove(latch); } } /** * Remove a watcher from a collection's watch list. * <p> * This allows Zookeeper watches to be removed if there is no interest in the * collection. * </p> * * @param collection the collection * @param watcher the watcher * @see #registerCollectionStateWatcher */ public void removeCollectionStateWatcher(String collection, CollectionStateWatcher watcher) { final DocCollectionAndLiveNodesWatcherWrapper wrapper = new DocCollectionAndLiveNodesWatcherWrapper(collection, watcher); removeDocCollectionWatcher(collection, wrapper); removeLiveNodesListener(wrapper); } /** * Remove a watcher from a collection's watch list. * <p> * This allows Zookeeper watches to be removed if there is no interest in the * collection. * </p> * * @param collection the collection * @param watcher the watcher * @see #registerDocCollectionWatcher */ public void removeDocCollectionWatcher(String collection, DocCollectionWatcher watcher) { AtomicBoolean reconstructState = new AtomicBoolean(false); collectionWatches.compute(collection, (k, v) -> { if (v == null) return null; v.stateWatchers.remove(watcher); if (v.canBeRemoved()) { watchedCollectionStates.remove(collection); lazyCollectionStates.put(collection, new LazyCollectionRef(collection)); reconstructState.set(true); return null; } return v; }); if (reconstructState.get()) { synchronized (getUpdateLock()) { constructState(Collections.emptySet()); } } } /* package-private for testing */ Set<DocCollectionWatcher> getStateWatchers(String collection) { final Set<DocCollectionWatcher> watchers = new HashSet<>(); collectionWatches.compute(collection, (k, v) -> { if (v != null) { watchers.addAll(v.stateWatchers); } return v; }); return watchers; } // returns true if the state has changed private boolean updateWatchedCollection(String coll, DocCollection newState) { if (newState == null) { log.debug("Removing cached collection state for [{}]", coll); watchedCollectionStates.remove(coll); return true; } boolean updated = false; // CAS update loop while (true) { if (!collectionWatches.containsKey(coll)) { break; } DocCollection oldState = watchedCollectionStates.get(coll); if (oldState == null) { if (watchedCollectionStates.putIfAbsent(coll, newState) == null) { if (log.isDebugEnabled()) { log.debug("Add data for [{}] ver [{}]", coll, newState.getZNodeVersion()); } updated = true; break; } } else { int oldCVersion = oldState.getPerReplicaStates() == null ? -1 : oldState.getPerReplicaStates().cversion; int newCVersion = newState.getPerReplicaStates() == null ? -1 : newState.getPerReplicaStates().cversion; if (oldState.getZNodeVersion() >= newState.getZNodeVersion() && oldCVersion >= newCVersion) { // no change to state, but we might have been triggered by the addition of a // state watcher, so run notifications updated = true; break; } if (watchedCollectionStates.replace(coll, oldState, newState)) { if (log.isDebugEnabled()) { log.debug("Updating data for [{}] from [{}] to [{}]", coll, oldState.getZNodeVersion(), newState.getZNodeVersion()); } updated = true; break; } } } // Resolve race with unregisterCore. if (!collectionWatches.containsKey(coll)) { watchedCollectionStates.remove(coll); log.debug("Removing uninteresting collection [{}]", coll); } return updated; } public void registerCollectionPropsWatcher(final String collection, CollectionPropsWatcher propsWatcher) { AtomicBoolean watchSet = new AtomicBoolean(false); collectionPropsObservers.compute(collection, (k, v) -> { if (v == null) { v = new CollectionWatch<>(); watchSet.set(true); } v.stateWatchers.add(propsWatcher); return v; }); if (watchSet.get()) { collectionPropsWatchers.computeIfAbsent(collection, PropsWatcher::new).refreshAndWatch(false); } } public void removeCollectionPropsWatcher(String collection, CollectionPropsWatcher watcher) { collectionPropsObservers.compute(collection, (k, v) -> { if (v == null) return null; v.stateWatchers.remove(watcher); if (v.canBeRemoved()) { // don't want this to happen in middle of other blocks that might add it back. synchronized (watchedCollectionProps) { watchedCollectionProps.remove(collection); } return null; } return v; }); } public static class ConfigData { public Map<String, Object> data; public int version; public ConfigData() { } public ConfigData(Map<String, Object> data, int version) { this.data = data; this.version = version; } } private void notifyStateWatchers(String collection, DocCollection collectionState) { if (this.closed) { return; } try { notifications.submit(new Notification(collection, collectionState)); } catch (RejectedExecutionException e) { if (closed == false) { log.error("Couldn't run collection notifications for {}", collection, e); } } } private class Notification implements Runnable { final String collection; final DocCollection collectionState; private Notification(String collection, DocCollection collectionState) { this.collection = collection; this.collectionState = collectionState; } @Override public void run() { List<DocCollectionWatcher> watchers = new ArrayList<>(); collectionWatches.compute(collection, (k, v) -> { if (v == null) return null; watchers.addAll(v.stateWatchers); return v; }); for (DocCollectionWatcher watcher : watchers) { try { if (watcher.onStateChanged(collectionState)) { removeDocCollectionWatcher(collection, watcher); } } catch (Exception exception) { log.warn("Error on calling watcher", exception); } } } } // // Aliases related // /** * Access to the {@link Aliases}. */ public final AliasesManager aliasesManager = new AliasesManager(); /** * Get an immutable copy of the present state of the aliases. References to this object should not be retained * in any context where it will be important to know if aliases have changed. * * @return The current aliases, Aliases.EMPTY if not solr cloud, or no aliases have existed yet. Never returns null. */ public Aliases getAliases() { return aliasesManager.getAliases(); } // called by createClusterStateWatchersAndUpdate() private void refreshAliases(AliasesManager watcher) throws KeeperException, InterruptedException { synchronized (getUpdateLock()) { constructState(Collections.emptySet()); zkClient.exists(ALIASES, watcher, true); } aliasesManager.update(); } /** * A class to manage the aliases instance, including watching for changes. * There should only ever be one instance of this class * per instance of ZkStateReader. Normally it will not be useful to create a new instance since * this watcher automatically re-registers itself every time it is updated. */ public class AliasesManager implements Watcher { // the holder is a Zk watcher // note: as of this writing, this class if very generic. Is it useful to use for other ZK managed things? private final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private volatile Aliases aliases = Aliases.EMPTY; public Aliases getAliases() { return aliases; // volatile read } /** * Writes an updated {@link Aliases} to zk. * It will retry if there are races with other modifications, giving up after 30 seconds with a SolrException. * The caller should understand it's possible the aliases has further changed if it examines it. */ public void applyModificationAndExportToZk(UnaryOperator<Aliases> op) { // The current aliases hasn't been update()'ed yet -- which is impossible? Any way just update it first. if (aliases.getZNodeVersion() == -1) { try { boolean updated = update(); assert updated; } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e); } catch (KeeperException e) { throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e); } } final long deadlineNanos = System.nanoTime() + TimeUnit.SECONDS.toNanos(30); // note: triesLeft tuning is based on ConcurrentCreateRoutedAliasTest for (int triesLeft = 30; triesLeft > 0; triesLeft--) { // we could synchronize on "this" but there doesn't seem to be a point; we have a retry loop. Aliases curAliases = getAliases(); Aliases modAliases = op.apply(curAliases); final byte[] modAliasesJson = modAliases.toJSON(); if (curAliases == modAliases) { log.debug("Current aliases has the desired modification; no further ZK interaction needed."); return; } try { try { final Stat stat = getZkClient().setData(ALIASES, modAliasesJson, curAliases.getZNodeVersion(), true); setIfNewer(Aliases.fromJSON(modAliasesJson, stat.getVersion())); return; } catch (KeeperException.BadVersionException e) { log.debug("{}", e, e); log.warn("Couldn't save aliases due to race with another modification; will update and retry until timeout"); // considered a backoff here, but we really do want to compete strongly since the normal case is // that we will do one update and succeed. This is left as a hot loop for limited tries intentionally. // More failures than that here probably indicate a bug or a very strange high write frequency usage for // aliases.json, timeouts mean zk is being very slow to respond, or this node is being crushed // by other processing and just can't find any cpu cycles at all. update(); if (deadlineNanos < System.nanoTime()) { throw new SolrException(ErrorCode.SERVER_ERROR, "Timed out trying to update aliases! " + "Either zookeeper or this node may be overloaded."); } } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e); } catch (KeeperException e) { throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e); } } throw new SolrException(ErrorCode.SERVER_ERROR, "Too many successive version failures trying to update aliases"); } /** * Ensures the internal aliases is up to date. If there is a change, return true. * * @return true if an update was performed */ public boolean update() throws KeeperException, InterruptedException { log.debug("Checking ZK for most up to date Aliases {}", ALIASES); // Call sync() first to ensure the subsequent read (getData) is up to date. zkClient.getSolrZooKeeper().sync(ALIASES, null, null); Stat stat = new Stat(); final byte[] data = zkClient.getData(ALIASES, null, stat, true); return setIfNewer(Aliases.fromJSON(data, stat.getVersion())); } // ZK Watcher interface @Override public void process(WatchedEvent event) { // session events are not change events, and do not remove the watcher if (EventType.None.equals(event.getType())) { return; } try { log.debug("Aliases: updating"); // re-register the watch Stat stat = new Stat(); final byte[] data = zkClient.getData(ALIASES, this, stat, true); // note: it'd be nice to avoid possibly needlessly parsing if we don't update aliases but not a big deal setIfNewer(Aliases.fromJSON(data, stat.getVersion())); } catch (NoNodeException e) { // /aliases.json will not always exist } catch (KeeperException.ConnectionLossException | KeeperException.SessionExpiredException e) { // note: aliases.json is required to be present log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: ", e); } catch (KeeperException e) { log.error("A ZK error has occurred", e); throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e); } catch (InterruptedException e) { // Restore the interrupted status Thread.currentThread().interrupt(); log.warn("Interrupted", e); } } /** * Update the internal aliases reference with a new one, provided that its ZK version has increased. * * @param newAliases the potentially newer version of Aliases * @return true if aliases have been updated to a new version, false otherwise */ private boolean setIfNewer(Aliases newAliases) { assert newAliases.getZNodeVersion() >= 0; synchronized (this) { int cmp = Integer.compare(aliases.getZNodeVersion(), newAliases.getZNodeVersion()); if (cmp < 0) { log.debug("Aliases: cmp={}, new definition is: {}", cmp, newAliases); aliases = newAliases; this.notifyAll(); return true; } else { log.debug("Aliases: cmp={}, not overwriting ZK version.", cmp); assert cmp != 0 || Arrays.equals(aliases.toJSON(), newAliases.toJSON()) : aliases + " != " + newAliases; return false; } } } } private void notifyPropsWatchers(String collection, Map<String, String> properties) { try { collectionPropsNotifications.submit(new PropsNotification(collection, properties)); } catch (RejectedExecutionException e) { if (!closed) { log.error("Couldn't run collection properties notifications for {}", collection, e); } } } private class PropsNotification implements Runnable { private final String collection; private final Map<String, String> collectionProperties; private final List<CollectionPropsWatcher> watchers = new ArrayList<>(); private PropsNotification(String collection, Map<String, String> collectionProperties) { this.collection = collection; this.collectionProperties = collectionProperties; // guarantee delivery of notification regardless of what happens to collectionPropsObservers // while we wait our turn in the executor by capturing the list on creation. collectionPropsObservers.compute(collection, (k, v) -> { if (v == null) return null; watchers.addAll(v.stateWatchers); return v; }); } @Override public void run() { for (CollectionPropsWatcher watcher : watchers) { if (watcher.onStateChanged(collectionProperties)) { removeCollectionPropsWatcher(collection, watcher); } } } } private class CacheCleaner implements Runnable { public void run() { while (!Thread.interrupted()) { try { Thread.sleep(60000); } catch (InterruptedException e) { // Executor shutdown will send us an interrupt break; } watchedCollectionProps.entrySet().removeIf(entry -> entry.getValue().cacheUntilNs < System.nanoTime() && !collectionPropsObservers.containsKey(entry.getKey())); } } } /** * Helper class that acts as both a {@link DocCollectionWatcher} and a {@link LiveNodesListener} * while wraping and delegating to a {@link CollectionStateWatcher} */ private final class DocCollectionAndLiveNodesWatcherWrapper implements DocCollectionWatcher, LiveNodesListener { private final String collectionName; private final CollectionStateWatcher delegate; public int hashCode() { return collectionName.hashCode() * delegate.hashCode(); } public boolean equals(Object other) { if (other instanceof DocCollectionAndLiveNodesWatcherWrapper) { DocCollectionAndLiveNodesWatcherWrapper that = (DocCollectionAndLiveNodesWatcherWrapper) other; return this.collectionName.equals(that.collectionName) && this.delegate.equals(that.delegate); } return false; } public DocCollectionAndLiveNodesWatcherWrapper(final String collectionName, final CollectionStateWatcher delegate) { this.collectionName = collectionName; this.delegate = delegate; } @Override public boolean onStateChanged(DocCollection collectionState) { final boolean result = delegate.onStateChanged(ZkStateReader.this.liveNodes, collectionState); if (result) { // it might be a while before live nodes changes, so proactively remove ourselves removeLiveNodesListener(this); } return result; } @Override public boolean onChange(SortedSet<String> oldLiveNodes, SortedSet<String> newLiveNodes) { final DocCollection collection = ZkStateReader.this.clusterState.getCollectionOrNull(collectionName); final boolean result = delegate.onStateChanged(newLiveNodes, collection); if (result) { // it might be a while before collection changes, so proactively remove ourselves removeDocCollectionWatcher(collectionName, this); } return result; } } public DocCollection getCollection(String collection) { return clusterState == null ? null : clusterState.getCollectionOrNull(collection); } }
1
40,991
Interesting. So you've found that it's faster to request only the "Stat" without the data so long as this is the typical path?
apache-lucene-solr
java
@@ -33,6 +33,7 @@ var skip = map[string]string{ "yield": "yield requires special test case (https://github.com/influxdata/flux/issues/535)", "task_per_line": "join produces inconsistent/racy results when table schemas do not match (https://github.com/influxdata/flux/issues/855)", "rowfn_with_import": "imported libraries are not visible in user-defined functions (https://github.com/influxdata/flux/issues/1000)", + "string_trim": "cannot reference a package function from within a row function", } var querier = querytest.NewQuerier()
1
package stdlib_test import ( "bytes" "context" "strings" "testing" "github.com/influxdata/flux" "github.com/influxdata/flux/ast" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/lang" "github.com/influxdata/flux/querytest" "github.com/influxdata/flux/stdlib" ) func init() { flux.FinalizeBuiltIns() } // list of end-to-end tests that are meant to be skipped and not run for various reasons var skip = map[string]string{ "string_max": "error: invalid use of function: *functions.MaxSelector has no implementation for type string (https://github.com/influxdata/platform/issues/224)", "null_as_value": "null not supported as value in influxql (https://github.com/influxdata/platform/issues/353)", "string_interp": "string interpolation not working as expected in flux (https://github.com/influxdata/platform/issues/404)", "to": "to functions are not supported in the testing framework (https://github.com/influxdata/flux/issues/77)", "covariance_missing_column_1": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)", "covariance_missing_column_2": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)", "drop_before_rename": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)", "drop_referenced": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)", "drop_non_existent": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)", "keep_non_existent": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)", "yield": "yield requires special test case (https://github.com/influxdata/flux/issues/535)", "task_per_line": "join produces inconsistent/racy results when table schemas do not match (https://github.com/influxdata/flux/issues/855)", "rowfn_with_import": "imported libraries are not visible in user-defined functions (https://github.com/influxdata/flux/issues/1000)", } var querier = querytest.NewQuerier() func TestFluxEndToEnd(t *testing.T) { runEndToEnd(t, querier, stdlib.FluxTestPackages) } func BenchmarkFluxEndToEnd(b *testing.B) { benchEndToEnd(b, querier, stdlib.FluxTestPackages) } func runEndToEnd(t *testing.T, querier *querytest.Querier, pkgs []*ast.Package) { for _, pkg := range pkgs { pkg := pkg.Copy().(*ast.Package) name := pkg.Files[0].Name t.Run(name, func(t *testing.T) { n := strings.TrimSuffix(name, ".flux") if reason, ok := skip[n]; ok { t.Skip(reason) } testFlux(t, querier, pkg) }) } } func benchEndToEnd(b *testing.B, querier *querytest.Querier, pkgs []*ast.Package) { for _, pkg := range pkgs { pkg := pkg.Copy().(*ast.Package) name := pkg.Files[0].Name b.Run(name, func(b *testing.B) { n := strings.TrimSuffix(name, ".flux") if reason, ok := skip[n]; ok { b.Skip(reason) } b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { testFlux(b, querier, pkg) } }) } } func testFlux(t testing.TB, querier *querytest.Querier, pkg *ast.Package) { pkg.Files = append(pkg.Files, stdlib.TestingRunCalls(pkg)) c := lang.ASTCompiler{AST: pkg} // testing.run doTestRun(t, querier, c) // testing.inspect if t.Failed() { // Rerun the test case using testing.inspect pkg.Files[len(pkg.Files)-1] = stdlib.TestingInspectCalls(pkg) c := lang.ASTCompiler{AST: pkg} doTestInspect(t, querier, c) } } func doTestRun(t testing.TB, querier *querytest.Querier, c flux.Compiler) { r, err := querier.C.Query(context.Background(), c) if err != nil { t.Fatalf("unexpected error while executing testing.run: %v", err) } defer r.Done() result, ok := <-r.Ready() if !ok { t.Fatalf("unexpected error retrieving testing.run result: %s", r.Err()) } // Read all results checking for errors for _, res := range result { err := res.Tables().Do(func(flux.Table) error { return nil }) if err != nil { t.Error(err) } } } func doTestInspect(t testing.TB, querier *querytest.Querier, c flux.Compiler) { r, err := querier.C.Query(context.Background(), c) if err != nil { t.Fatalf("unexpected error while executing testing.inspect: %v", err) } defer r.Done() result, ok := <-r.Ready() if !ok { t.Fatalf("unexpected error retrieving testing.inspect result: %s", r.Err()) } // Read all results and format them var out bytes.Buffer for _, res := range result { if err := execute.FormatResult(&out, res); err != nil { t.Error(err) } } t.Log(out.String()) }
1
9,973
Is this a future fix? I thought the local identifier scope and the package issue were the same one.
influxdata-flux
go
@@ -247,7 +247,17 @@ class TextInfoQuickNavItem(QuickNavItem): class BrowseModeTreeInterceptor(treeInterceptorHandler.TreeInterceptor): scriptCategory = inputCore.SCRCAT_BROWSEMODE disableAutoPassThrough = False - APPLICATION_ROLES = (controlTypes.ROLE_APPLICATION, controlTypes.ROLE_DIALOG) + + def _isApplicationObject(self,obj): + """ + Checks whether the given object in this browseMode treeInterceptor is an application (I.e. although the root of the object is represented in the treeInterceptor, its content is not). + I.e. an application or modal dialog. + """ + if obj.role==controlTypes.ROLE_APPLICATION: + return True + elif obj.role==controlTypes.ROLE_DIALOG and controlTypes.STATE_MODAL in obj.states: + return True + return False def _get_currentNVDAObject(self): raise NotImplementedError
1
#browseMode.py #A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2007-2017 NV Access Limited, Babbage B.V. #This file is covered by the GNU General Public License. #See the file COPYING for more details. import itertools import collections import winsound import time import weakref import wx from logHandler import log import documentBase import review import scriptHandler import eventHandler import nvwave import queueHandler import gui import ui import cursorManager from scriptHandler import isScriptWaiting, willSayAllResume import aria import controlTypes import config import textInfos import braille import speech import sayAllHandler import treeInterceptorHandler import inputCore import api import gui.guiHelper from NVDAObjects import NVDAObject REASON_QUICKNAV = "quickNav" def reportPassThrough(treeInterceptor,onlyIfChanged=True): """Reports the pass through mode if it has changed. @param treeInterceptor: The current Browse Mode treeInterceptor. @type treeInterceptor: L{BrowseModeTreeInterceptor} @param onlyIfChanged: if true reporting will not happen if the last reportPassThrough reported the same thing. @type onlyIfChanged: bool """ if not onlyIfChanged or treeInterceptor.passThrough != reportPassThrough.last: if config.conf["virtualBuffers"]["passThroughAudioIndication"]: sound = r"waves\focusMode.wav" if treeInterceptor.passThrough else r"waves\browseMode.wav" nvwave.playWaveFile(sound) else: if treeInterceptor.passThrough: # Translators: The mode to interact with controls in documents ui.message(_("Focus mode")) else: # Translators: The mode that presents text in a flat representation # that can be navigated with the cursor keys like in a text document ui.message(_("Browse mode")) reportPassThrough.last = treeInterceptor.passThrough reportPassThrough.last = False def mergeQuickNavItemIterators(iterators,direction="next"): """ Merges multiple iterators that emit L{QuickNavItem} objects, yielding them from first to last. They are sorted using min or max (__lt__ should be implemented on the L{QuickNavItem} objects). @param iters: the iterators you want to merge. @type iters: sequence of iterators that emit L{QuicknavItem} objects. @param direction: the direction these iterators are searching (e.g. next, previous) @type direction: string """ finder=min if direction=="next" else max curValues=[] # Populate a list with all iterators and their corisponding first value for it in iterators: try: val=next(it) except StopIteration: continue curValues.append((it,val)) # Until all iterators have been used up, # Find the first (minimum or maximum) of all the values, # emit that, and update the list with the next available value for the iterator whose value was emitted. while len(curValues)>0: first=finder(curValues,key=lambda x: x[1]) curValues.remove(first) it,val=first yield val try: newVal=next(it) except StopIteration: continue curValues.append((it,newVal)) class QuickNavItem(object): """ Emitted by L{BrowseModeTreeInterceptor._iterNodesByType}, this represents one of many positions in a browse mode document, based on the type of item being searched for (e.g. link, heading, table etc).""" itemType=None #: The type of items searched for (e.g. link, heading, table etc) label=None #: The label that should represent this item in the Elements list. isAfterSelection=False #: Is this item positioned after the caret in the document? Used by the elements list to place its own selection. def __init__(self,itemType,document): """ @param itemType: the type that was searched for (e.g. link, heading, table etc) @ type itemType: string @ param document: the browse mode document this item is a part of. @type document: L{BrowseModeTreeInterceptor} """ self.itemType=itemType self.document=document def isChild(self,parent): """ Is this item a child of the given parent? This is used when representing items in a hierarchical tree structure, such as the Elements List. @param parent: the item of whom this item may be a child of. @type parent: L{QuickNavItem} @return: True if this item is a child, false otherwise. @rtype: bool """ raise NotImplementedError def report(self,readUnit=None): """ Reports the contents of this item. @param readUnit: the optional unit (e.g. line, paragraph) that should be used to announce the item position when moved to. If not given, then the full sise of the item is used. @type readUnit: a L{textInfos}.UNIT_* constant. """ raise NotImplementedError def moveTo(self): """ Moves the browse mode caret or focus to this item. """ raise NotImplementedError def activate(self): """ Activates this item's position. E.g. follows a link, presses a button etc. """ raise NotImplementedError def rename(self,newName): """ Renames this item with the new name. """ raise NotImplementedError @property def isRenameAllowed(self): return False class TextInfoQuickNavItem(QuickNavItem): """ Represents a quick nav item in a browse mode document who's positions are represented by a L{textInfos.TextInfo}. """ def __init__(self,itemType,document,textInfo): """ See L{QuickNavItem.__init__} for itemType and document argument definitions. @param textInfo: the textInfo position this item represents. @type textInfo: L{textInfos.TextInfo} """ self.textInfo=textInfo super(TextInfoQuickNavItem,self).__init__(itemType,document) def __lt__(self,other): return self.textInfo.compareEndPoints(other.textInfo,"startToStart")<0 @property def obj(self): return self.textInfo.basePosition if isinstance(self.textInfo.basePosition,NVDAObject) else None @property def label(self): return self.textInfo.text.strip() def isChild(self,parent): if parent.textInfo.isOverlapping(self.textInfo): return True return False def report(self,readUnit=None): info=self.textInfo if readUnit: fieldInfo = info.copy() info.collapse() info.move(readUnit, 1, endPoint="end") if info.compareEndPoints(fieldInfo, "endToEnd") > 0: # We've expanded past the end of the field, so limit to the end of the field. info.setEndPoint(fieldInfo, "endToEnd") speech.speakTextInfo(info, reason=controlTypes.REASON_FOCUS) def activate(self): self.textInfo.obj._activatePosition(self.textInfo) def moveTo(self): info=self.textInfo.copy() info.collapse() self.document._set_selection(info,reason=REASON_QUICKNAV) @property def isAfterSelection(self): caret=self.document.makeTextInfo(textInfos.POSITION_CARET) return self.textInfo.compareEndPoints(caret, "startToStart") > 0 def _getLabelForProperties(self, labelPropertyGetter): """ Fetches required properties for this L{TextInfoQuickNavItem} and constructs a label to be shown in an elements list. This can be used by subclasses to implement the L{label} property. @Param labelPropertyGetter: A callable taking 1 argument, specifying the property to fetch. For example, if L{itemType} is landmark, the callable must return the landmark type when "landmark" is passed as the property argument. Alternative property names might be name or value. The callable must return None if the property doesn't exist. An expected callable might be get method on a L{Dict}, or "lambda property: getattr(self.obj, property, None)" for an L{NVDAObject}. """ content = self.textInfo.text.strip() if self.itemType is "heading": # Output: displayed text of the heading. return content labelParts = None name = labelPropertyGetter("name") if self.itemType is "landmark": landmark = aria.landmarkRoles.get(labelPropertyGetter("landmark")) # Example output: main menu; navigation labelParts = (name, landmark) else: role = labelPropertyGetter("role") roleText = controlTypes.roleLabels[role] # Translators: Reported label in the elements list for an element which which has no name and value unlabeled = _("Unlabeled") realStates = labelPropertyGetter("states") labeledStates = " ".join(controlTypes.processAndLabelStates(role, realStates, controlTypes.REASON_FOCUS)) if self.itemType is "formField": if role in (controlTypes.ROLE_BUTTON,controlTypes.ROLE_DROPDOWNBUTTON,controlTypes.ROLE_TOGGLEBUTTON,controlTypes.ROLE_SPLITBUTTON,controlTypes.ROLE_MENUBUTTON,controlTypes.ROLE_DROPDOWNBUTTONGRID,controlTypes.ROLE_SPINBUTTON,controlTypes.ROLE_TREEVIEWBUTTON): # Example output: Mute; toggle button; pressed labelParts = (content or name or unlabeled, roleText, labeledStates) else: # Example output: Find a repository...; edit; has auto complete; NVDA labelParts = (name or unlabeled, roleText, labeledStates, content) elif self.itemType in ("link", "button"): # Example output: You have unread notifications; visited labelParts = (content or name or unlabeled, labeledStates) if labelParts: label = "; ".join(lp for lp in labelParts if lp) else: label = content return label class BrowseModeTreeInterceptor(treeInterceptorHandler.TreeInterceptor): scriptCategory = inputCore.SCRCAT_BROWSEMODE disableAutoPassThrough = False APPLICATION_ROLES = (controlTypes.ROLE_APPLICATION, controlTypes.ROLE_DIALOG) def _get_currentNVDAObject(self): raise NotImplementedError ALWAYS_SWITCH_TO_PASS_THROUGH_ROLES = frozenset({ controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_LIST, controlTypes.ROLE_SLIDER, controlTypes.ROLE_TABCONTROL, controlTypes.ROLE_MENUBAR, controlTypes.ROLE_POPUPMENU, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM, controlTypes.ROLE_SPINBUTTON, controlTypes.ROLE_TABLEROW, controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLEROWHEADER, controlTypes.ROLE_TABLECOLUMNHEADER, }) SWITCH_TO_PASS_THROUGH_ON_FOCUS_ROLES = frozenset({ controlTypes.ROLE_LISTITEM, controlTypes.ROLE_RADIOBUTTON, controlTypes.ROLE_TAB, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_RADIOMENUITEM, controlTypes.ROLE_CHECKMENUITEM, }) def shouldPassThrough(self, obj, reason=None): """Determine whether pass through mode should be enabled (focus mode) or disabled (browse mode) for a given object. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} @param reason: The reason for this query; one of the output reasons, L{REASON_QUICKNAV}, or C{None} for manual pass through mode activation by the user. @return: C{True} if pass through mode (focus mode) should be enabled, C{False} if it should be disabled (browse mode). """ if reason and ( self.disableAutoPassThrough or (reason == controlTypes.REASON_FOCUS and not config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"]) or (reason == controlTypes.REASON_CARET and not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]) ): # This check relates to auto pass through and auto pass through is disabled, so don't change the pass through state. return self.passThrough if reason == REASON_QUICKNAV: return False states = obj.states role = obj.role if controlTypes.STATE_EDITABLE in states and controlTypes.STATE_UNAVAILABLE not in states: return True # Menus sometimes get focus due to menuStart events even though they don't report as focused/focusable. if not obj.isFocusable and controlTypes.STATE_FOCUSED not in states and role != controlTypes.ROLE_POPUPMENU: return False # many controls that are read-only should not switch to passThrough. # However, certain controls such as combo boxes and readonly edits are read-only but still interactive. # #5118: read-only ARIA grids should also be allowed (focusable table cells, rows and headers). if controlTypes.STATE_READONLY in states and role not in (controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_TABLEROW, controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLEROWHEADER, controlTypes.ROLE_TABLECOLUMNHEADER): return False # Any roles or states for which we always switch to passThrough if role in self.ALWAYS_SWITCH_TO_PASS_THROUGH_ROLES or controlTypes.STATE_EDITABLE in states: return True # focus is moving to this control. Perhaps after pressing tab or clicking a button that brings up a menu (via javascript) if reason == controlTypes.REASON_FOCUS: if role in self.SWITCH_TO_PASS_THROUGH_ON_FOCUS_ROLES: return True # If this is a focus change, pass through should be enabled for certain ancestor containers. # this is done last for performance considerations. Walking up the through the parents could be costly while obj and obj != self.rootNVDAObject: if obj.role == controlTypes.ROLE_TOOLBAR: return True obj = obj.parent return False def _get_shouldTrapNonCommandGestures(self): return config.conf['virtualBuffers']['trapNonCommandGestures'] def script_trapNonCommandGesture(self,gesture): winsound.PlaySound("default",1) singleLetterNavEnabled=True #: Whether single letter navigation scripts should be active (true) or if these letters should fall to the application. def getAlternativeScript(self,gesture,script): if self.passThrough or not gesture.isCharacter: return script if not self.singleLetterNavEnabled: return None if not script and self.shouldTrapNonCommandGestures: script=self.script_trapNonCommandGesture return script def script_toggleSingleLetterNav(self,gesture): if self.singleLetterNavEnabled: self.singleLetterNavEnabled=False # Translators: Reported when single letter navigation in browse mode is turned off. ui.message(_("Single letter navigation off")) else: self.singleLetterNavEnabled=True # Translators: Reported when single letter navigation in browse mode is turned on. ui.message(_("Single letter navigation on")) # Translators: the description for the toggleSingleLetterNavigation command in browse mode. script_toggleSingleLetterNav.__doc__=_("Toggles single letter navigation on and off. When on, single letter keys in browse mode jump to various kinds of elements on the page. When off, these keys are passed to the application") def _get_ElementsListDialog(self): return ElementsListDialog def _iterNodesByType(self,itemType,direction="next",pos=None): """ Yields L{QuickNavItem} objects representing the ordered positions in this document according to the type being searched for (e.g. link, heading, table etc). @param itemType: the type being searched for (e.g. link, heading, table etc) @type itemType: string @param direction: the direction in which to search (next, previous, up) @ type direction: string @param pos: the position in the document from where to start the search. @type pos: Usually an L{textInfos.TextInfo} @raise NotImplementedError: This type is not supported by this BrowseMode implementation """ raise NotImplementedError def _iterNotLinkBlock(self, direction="next", pos=None): raise NotImplementedError def _quickNavScript(self,gesture, itemType, direction, errorMessage, readUnit): if itemType=="notLinkBlock": iterFactory=self._iterNotLinkBlock else: iterFactory=lambda direction,info: self._iterNodesByType(itemType,direction,info) info=self.selection try: item = next(iterFactory(direction, info)) except NotImplementedError: # Translators: a message when a particular quick nav command is not supported in the current document. ui.message(_("Not supported in this document")) return except StopIteration: ui.message(errorMessage) return item.moveTo() if not gesture or not willSayAllResume(gesture): item.report(readUnit=readUnit) @classmethod def addQuickNav(cls, itemType, key, nextDoc, nextError, prevDoc, prevError, readUnit=None): """Adds a script for the given quick nav item. @param itemType: The type of item, I.E. "heading" "Link" ... @param key: The quick navigation key to bind to the script. Shift is automatically added for the previous item gesture. E.G. h for heading @param nextDoc: The command description to bind to the script that yields the next quick nav item. @param nextError: The error message if there are no more quick nav items of type itemType in this direction. @param prevDoc: The command description to bind to the script that yields the previous quick nav item. @param prevError: The error message if there are no more quick nav items of type itemType in this direction. @param readUnit: The unit (one of the textInfos.UNIT_* constants) to announce when moving to this type of item. For example, only the line is read when moving to tables to avoid reading a potentially massive table. If None, the entire item will be announced. """ scriptSuffix = itemType[0].upper() + itemType[1:] scriptName = "next%s" % scriptSuffix funcName = "script_%s" % scriptName script = lambda self,gesture: self._quickNavScript(gesture, itemType, "next", nextError, readUnit) script.__doc__ = nextDoc script.__name__ = funcName script.resumeSayAllMode=sayAllHandler.CURSOR_CARET setattr(cls, funcName, script) cls.__gestures["kb:%s" % key] = scriptName scriptName = "previous%s" % scriptSuffix funcName = "script_%s" % scriptName script = lambda self,gesture: self._quickNavScript(gesture, itemType, "previous", prevError, readUnit) script.__doc__ = prevDoc script.__name__ = funcName script.resumeSayAllMode=sayAllHandler.CURSOR_CARET setattr(cls, funcName, script) cls.__gestures["kb:shift+%s" % key] = scriptName def script_elementsList(self,gesture): # We need this to be a modal dialog, but it mustn't block this script. def run(): gui.mainFrame.prePopup() d = self.ElementsListDialog(self) d.ShowModal() d.Destroy() gui.mainFrame.postPopup() wx.CallAfter(run) # Translators: the description for the Elements List command in browse mode. script_elementsList.__doc__ = _("Lists various types of elements in this document") def _activateNVDAObject(self, obj): """Activate an object in response to a user request. This should generally perform the default action or click on the object. @param obj: The object to activate. @type obj: L{NVDAObjects.NVDAObject} """ try: obj.doAction() except NotImplementedError: log.debugWarning("doAction not implemented") def _activatePosition(self,obj=None): if not obj: obj=self.currentNVDAObject if not obj: return if obj.role == controlTypes.ROLE_MATH: import mathPres try: return mathPres.interactWithMathMl(obj.mathMl) except (NotImplementedError, LookupError): pass return if self.shouldPassThrough(obj): obj.setFocus() self.passThrough = True reportPassThrough(self) elif obj.role == controlTypes.ROLE_EMBEDDEDOBJECT or obj.role in self.APPLICATION_ROLES: obj.setFocus() speech.speakObject(obj, reason=controlTypes.REASON_FOCUS) else: self._activateNVDAObject(obj) def script_activatePosition(self,gesture): self._activatePosition() # Translators: the description for the activatePosition script on browseMode documents. script_activatePosition.__doc__ = _("Activates the current object in the document") def script_disablePassThrough(self, gesture): if not self.passThrough or self.disableAutoPassThrough: return gesture.send() self.passThrough = False self.disableAutoPassThrough = False reportPassThrough(self) script_disablePassThrough.ignoreTreeInterceptorPassThrough = True __gestures={ "kb:NVDA+f7": "elementsList", "kb:enter": "activatePosition", "kb:numpadEnter": "activatePosition", "kb:space": "activatePosition", "kb:NVDA+shift+space":"toggleSingleLetterNav", "kb:escape": "disablePassThrough", } # Add quick navigation scripts. qn = BrowseModeTreeInterceptor.addQuickNav qn("heading", key="h", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading")) qn("heading1", key="1", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 1"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 1"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 1"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 1")) qn("heading2", key="2", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 2"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 2"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 2"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 2")) qn("heading3", key="3", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 3"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 3"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 3"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 3")) qn("heading4", key="4", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 4"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 4"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 4"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 4")) qn("heading5", key="5", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 5"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 5"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 5"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 5")) qn("heading6", key="6", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 6"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 6"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 6"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 6")) qn("table", key="t", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next table"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next table"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous table"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous table"), readUnit=textInfos.UNIT_LINE) qn("link", key="k", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next link"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next link"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous link"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous link")) qn("visitedLink", key="v", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next visited link"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next visited link"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous visited link"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous visited link")) qn("unvisitedLink", key="u", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next unvisited link"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next unvisited link"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous unvisited link"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous unvisited link")) qn("formField", key="f", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next form field"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next form field"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous form field"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous form field"), readUnit=textInfos.UNIT_LINE) qn("list", key="l", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next list"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next list"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous list"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous list"), readUnit=textInfos.UNIT_LINE) qn("listItem", key="i", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next list item"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next list item"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous list item"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous list item")) qn("button", key="b", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next button"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next button"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous button"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous button")) qn("edit", key="e", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next edit field"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next edit field"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous edit field"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous edit field"), readUnit=textInfos.UNIT_LINE) qn("frame", key="m", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next frame"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next frame"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous frame"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous frame"), readUnit=textInfos.UNIT_LINE) qn("separator", key="s", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next separator"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next separator"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous separator"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous separator")) qn("radioButton", key="r", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next radio button"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next radio button"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous radio button"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous radio button")) qn("comboBox", key="c", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next combo box"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next combo box"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous combo box"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous combo box")) qn("checkBox", key="x", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next check box"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next check box"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous check box"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous check box")) qn("graphic", key="g", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next graphic"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next graphic"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous graphic"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous graphic")) qn("blockQuote", key="q", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next block quote"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next block quote"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous block quote"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous block quote")) qn("notLinkBlock", key="n", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("skips forward past a block of links"), # Translators: Message presented when the browse mode element is not found. nextError=_("no more text after a block of links"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("skips backward past a block of links"), # Translators: Message presented when the browse mode element is not found. prevError=_("no more text before a block of links"), readUnit=textInfos.UNIT_LINE) qn("landmark", key="d", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next landmark"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next landmark"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous landmark"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous landmark"), readUnit=textInfos.UNIT_LINE) qn("embeddedObject", key="o", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next embedded object"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next embedded object"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous embedded object"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous embedded object")) qn("annotation", key="a", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next annotation"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next annotation"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous annotation"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous annotation")) qn("error", key="w", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next error"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next error"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous error"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous error")) del qn class ElementsListDialog(wx.Dialog): ELEMENT_TYPES = ( # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("link", _("Lin&ks")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("heading", _("&Headings")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("formField", _("&Form fields")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("button", _("&Buttons")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("landmark", _("Lan&dmarks")), ) Element = collections.namedtuple("Element", ("item", "parent")) lastSelectedElementType=0 def __init__(self, document): self.document = document # Translators: The title of the browse mode Elements List dialog. super(ElementsListDialog, self).__init__(gui.mainFrame, wx.ID_ANY, _("Elements List")) mainSizer = wx.BoxSizer(wx.VERTICAL) contentsSizer = wx.BoxSizer(wx.VERTICAL) # Translators: The label of a group of radio buttons to select the type of element # in the browse mode Elements List dialog. child = wx.RadioBox(self, wx.ID_ANY, label=_("Type:"), choices=tuple(et[1] for et in self.ELEMENT_TYPES)) child.SetSelection(self.lastSelectedElementType) child.Bind(wx.EVT_RADIOBOX, self.onElementTypeChange) contentsSizer.Add(child, flag=wx.EXPAND) contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS) self.tree = wx.TreeCtrl(self, size=wx.Size(500, 600), style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_LINES_AT_ROOT | wx.TR_SINGLE | wx.TR_EDIT_LABELS) self.tree.Bind(wx.EVT_SET_FOCUS, self.onTreeSetFocus) self.tree.Bind(wx.EVT_CHAR, self.onTreeChar) self.tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.onTreeLabelEditBegin) self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.onTreeLabelEditEnd) self.treeRoot = self.tree.AddRoot("root") contentsSizer.Add(self.tree,flag=wx.EXPAND) contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS) # Translators: The label of an editable text field to filter the elements # in the browse mode Elements List dialog. filterText = _("Filt&er by:") labeledCtrl = gui.guiHelper.LabeledControlHelper(self, filterText, wx.TextCtrl) self.filterEdit = labeledCtrl.control self.filterEdit.Bind(wx.EVT_TEXT, self.onFilterEditTextChange) contentsSizer.Add(labeledCtrl.sizer) contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS) bHelper = gui.guiHelper.ButtonHelper(wx.HORIZONTAL) # Translators: The label of a button to activate an element # in the browse mode Elements List dialog. self.activateButton = bHelper.addButton(self, label=_("&Activate")) self.activateButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(True)) # Translators: The label of a button to move to an element # in the browse mode Elements List dialog. self.moveButton = bHelper.addButton(self, label=_("&Move to")) self.moveButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(False)) bHelper.addButton(self, id=wx.ID_CANCEL) contentsSizer.Add(bHelper.sizer, flag=wx.ALIGN_RIGHT) mainSizer.Add(contentsSizer, border=gui.guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL) mainSizer.Fit(self) self.SetSizer(mainSizer) self.tree.SetFocus() self.initElementType(self.ELEMENT_TYPES[self.lastSelectedElementType][0]) self.Center(wx.BOTH | wx.CENTER_ON_SCREEN) def onElementTypeChange(self, evt): elementType=evt.GetInt() # We need to make sure this gets executed after the focus event. # Otherwise, NVDA doesn't seem to get the event. queueHandler.queueFunction(queueHandler.eventQueue, self.initElementType, self.ELEMENT_TYPES[elementType][0]) self.lastSelectedElementType=elementType def initElementType(self, elType): if elType in ("link","button"): # Links and buttons can be activated. self.activateButton.Enable() self.SetAffirmativeId(self.activateButton.GetId()) else: # No other element type can be activated. self.activateButton.Disable() self.SetAffirmativeId(self.moveButton.GetId()) # Gather the elements of this type. self._elements = [] self._initialElement = None parentElements = [] isAfterSelection=False for item in self.document._iterNodesByType(elType): # Find the parent element, if any. for parent in reversed(parentElements): if item.isChild(parent.item): break else: # We're not a child of this parent, so this parent has no more children and can be removed from the stack. parentElements.pop() else: # No parent found, so we're at the root. # Note that parentElements will be empty at this point, as all parents are no longer relevant and have thus been removed from the stack. parent = None element=self.Element(item,parent) self._elements.append(element) if not isAfterSelection: isAfterSelection=item.isAfterSelection if not isAfterSelection: # The element immediately preceding or overlapping the caret should be the initially selected element. # Since we have not yet passed the selection, use this as the initial element. try: self._initialElement = self._elements[-1] except IndexError: # No previous element. pass # This could be the parent of a subsequent element, so add it to the parents stack. parentElements.append(element) # Start with no filtering. self.filterEdit.ChangeValue("") self.filter("", newElementType=True) def filter(self, filterText, newElementType=False): # If this is a new element type, use the element nearest the cursor. # Otherwise, use the currently selected element. defaultElement = self._initialElement if newElementType else self.tree.GetItemPyData(self.tree.GetSelection()) # Clear the tree. self.tree.DeleteChildren(self.treeRoot) # Populate the tree with elements matching the filter text. elementsToTreeItems = {} defaultItem = None matched = False #Do case-insensitive matching by lowering both filterText and each element's text. filterText=filterText.lower() for element in self._elements: label=element.item.label if filterText and filterText not in label.lower(): continue matched = True parent = element.parent if parent: parent = elementsToTreeItems.get(parent) item = self.tree.AppendItem(parent or self.treeRoot, label) self.tree.SetItemPyData(item, element) elementsToTreeItems[element] = item if element == defaultElement: defaultItem = item self.tree.ExpandAll() if not matched: # No items, so disable the buttons. self.activateButton.Disable() self.moveButton.Disable() return # If there's no default item, use the first item in the tree. self.tree.SelectItem(defaultItem or self.tree.GetFirstChild(self.treeRoot)[0]) # Enable the button(s). # If the activate button isn't the default button, it is disabled for this element type and shouldn't be enabled here. if self.AffirmativeId == self.activateButton.Id: self.activateButton.Enable() self.moveButton.Enable() def onTreeSetFocus(self, evt): # Start with no search. self._searchText = "" self._searchCallLater = None evt.Skip() def onTreeChar(self, evt): key = evt.KeyCode if key == wx.WXK_RETURN: # The enter key should be propagated to the dialog and thus activate the default button, # but this is broken (wx ticket #3725). # Therefore, we must catch the enter key here. # Activate the current default button. evt = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_ANY) button = self.FindWindowById(self.AffirmativeId) if button.Enabled: button.ProcessEvent(evt) else: wx.Bell() elif key == wx.WXK_F2: item=self.tree.GetSelection() if item: selectedItemType=self.tree.GetItemPyData(item).item self.tree.EditLabel(item) evt.Skip() elif key >= wx.WXK_START or key == wx.WXK_BACK: # Non-printable character. self._searchText = "" evt.Skip() else: # Search the list. # We have to implement this ourselves, as tree views don't accept space as a search character. char = unichr(evt.UnicodeKey).lower() # IF the same character is typed twice, do the same search. if self._searchText != char: self._searchText += char if self._searchCallLater: self._searchCallLater.Restart() else: self._searchCallLater = wx.CallLater(1000, self._clearSearchText) self.search(self._searchText) def onTreeLabelEditBegin(self,evt): item=self.tree.GetSelection() selectedItemType = self.tree.GetItemPyData(item).item if not selectedItemType.isRenameAllowed: evt.Veto() def onTreeLabelEditEnd(self,evt): selectedItemNewName=evt.GetLabel() item=self.tree.GetSelection() selectedItemType = self.tree.GetItemPyData(item).item selectedItemType.rename(selectedItemNewName) def _clearSearchText(self): self._searchText = "" def search(self, searchText): item = self.tree.GetSelection() if not item: # No items. return # First try searching from the current item. # Failing that, search from the first item. items = itertools.chain(self._iterReachableTreeItemsFromItem(item), self._iterReachableTreeItemsFromItem(self.tree.GetFirstChild(self.treeRoot)[0])) if len(searchText) == 1: # If only a single character has been entered, skip (search after) the current item. next(items) for item in items: if self.tree.GetItemText(item).lower().startswith(searchText): self.tree.SelectItem(item) return # Not found. wx.Bell() def _iterReachableTreeItemsFromItem(self, item): while item: yield item childItem = self.tree.GetFirstChild(item)[0] if childItem and self.tree.IsExpanded(item): # Has children and is reachable, so recurse. for childItem in self._iterReachableTreeItemsFromItem(childItem): yield childItem item = self.tree.GetNextSibling(item) def onFilterEditTextChange(self, evt): self.filter(self.filterEdit.GetValue()) evt.Skip() def onAction(self, activate): self.Close() # Save off the last selected element type on to the class so its used in initialization next time. self.__class__.lastSelectedElementType=self.lastSelectedElementType item = self.tree.GetSelection() item = self.tree.GetItemPyData(item).item if activate: item.activate() else: def move(): speech.cancelSpeech() item.moveTo() item.report() wx.CallLater(100, move) class BrowseModeDocumentTextInfo(textInfos.TextInfo): def getControlFieldSpeech(self, attrs, ancestorAttrs, fieldType, formatConfig=None, extraDetail=False, reason=None): textList = [] landmark = attrs.get("landmark") if formatConfig["reportLandmarks"] and fieldType == "start_addedToControlFieldStack" and landmark: try: textList.append(attrs["name"]) except KeyError: pass if landmark == "region": # The word landmark is superfluous for regions. textList.append(aria.landmarkRoles[landmark]) else: textList.append(_("%s landmark") % aria.landmarkRoles[landmark]) textList.append(super(BrowseModeDocumentTextInfo, self).getControlFieldSpeech(attrs, ancestorAttrs, fieldType, formatConfig, extraDetail, reason)) return " ".join(textList) def getControlFieldBraille(self, field, ancestors, reportStart, formatConfig): textList = [] landmark = field.get("landmark") if formatConfig["reportLandmarks"] and reportStart and landmark and field.get("_startOfNode"): try: textList.append(field["name"]) except KeyError: pass if landmark == "region": # The word landmark is superfluous for regions. textList.append(braille.landmarkLabels[landmark]) else: # Translators: This is brailled to indicate a landmark (example output: lmk main). textList.append(_("lmk %s") % braille.landmarkLabels[landmark]) text = super(BrowseModeDocumentTextInfo, self).getControlFieldBraille(field, ancestors, reportStart, formatConfig) if text: textList.append(text) return " ".join(textList) def _get_focusableNVDAObjectAtStart(self): try: item = next(self.obj._iterNodesByType("focusable", "up", self)) except StopIteration: return self.obj.rootNVDAObject if not item: return self.obj.rootNVDAObject return item.obj class BrowseModeDocumentTreeInterceptor(documentBase.DocumentWithTableNavigation,cursorManager.CursorManager,BrowseModeTreeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor): programmaticScrollMayFireEvent = False def __init__(self,obj): super(BrowseModeDocumentTreeInterceptor,self).__init__(obj) self._lastProgrammaticScrollTime = None self.documentConstantIdentifier = self.documentConstantIdentifier self._lastFocusObj = None self._hadFirstGainFocus = False self._enteringFromOutside = True # We need to cache this because it will be unavailable once the document dies. if not hasattr(self.rootNVDAObject.appModule, "_browseModeRememberedCaretPositions"): self.rootNVDAObject.appModule._browseModeRememberedCaretPositions = {} self._lastCaretPosition = None #: True if the last caret move was due to a focus change. self._lastCaretMoveWasFocus = False def terminate(self): if self.shouldRememberCaretPositionAcrossLoads and self._lastCaretPosition: try: self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier] = self._lastCaretPosition except AttributeError: # The app module died. pass def _get_currentNVDAObject(self): return self.makeTextInfo(textInfos.POSITION_CARET).NVDAObjectAtStart def event_treeInterceptor_gainFocus(self): """Triggered when this browse mode document gains focus. This event is only fired upon entering this treeInterceptor when it was not the current treeInterceptor before. This is different to L{event_gainFocus}, which is fired when an object inside this treeInterceptor gains focus, even if that object is in the same treeInterceptor. """ doSayAll=False hadFirstGainFocus=self._hadFirstGainFocus if not hadFirstGainFocus: # This treeInterceptor is gaining focus for the first time. # Fake a focus event on the focus object, as the treeInterceptor may have missed the actual focus event. focus = api.getFocusObject() self.event_gainFocus(focus, lambda: focus.event_gainFocus()) if not self.passThrough: # We only set the caret position if in browse mode. # If in focus mode, the document must have forced the focus somewhere, # so we don't want to override it. initialPos = self._getInitialCaretPos() if initialPos: self.selection = self.makeTextInfo(initialPos) reportPassThrough(self) doSayAll=config.conf['virtualBuffers']['autoSayAllOnPageLoad'] self._hadFirstGainFocus = True if not self.passThrough: if doSayAll: speech.speakObjectProperties(self.rootNVDAObject,name=True,states=True,reason=controlTypes.REASON_FOCUS) sayAllHandler.readText(sayAllHandler.CURSOR_CARET) else: # Speak it like we would speak focus on any other document object. # This includes when entering the treeInterceptor for the first time: if not hadFirstGainFocus: speech.speakObject(self.rootNVDAObject, reason=controlTypes.REASON_FOCUS) else: # And when coming in from an outside object # #4069 But not when coming up from a non-rendered descendant. ancestors=api.getFocusAncestors() fdl=api.getFocusDifferenceLevel() try: tl=ancestors.index(self.rootNVDAObject) except ValueError: tl=len(ancestors) if fdl<=tl: speech.speakObject(self.rootNVDAObject, reason=controlTypes.REASON_FOCUS) info = self.selection if not info.isCollapsed: speech.speakSelectionMessage(_("selected %s"), info.text) else: info.expand(textInfos.UNIT_LINE) speech.speakTextInfo(info, reason=controlTypes.REASON_CARET, unit=textInfos.UNIT_LINE) reportPassThrough(self) braille.handler.handleGainFocus(self) def event_caret(self, obj, nextHandler): if self.passThrough: nextHandler() def _activateLongDesc(self,controlField): """ Activates (presents) the long description for a particular field (usually a graphic). @param controlField: the field who's long description should be activated. This field is guaranteed to have states containing HASLONGDESC state. @type controlField: dict """ raise NotImplementedError def _activatePosition(self, info=None): obj=None if info: obj=info.NVDAObjectAtStart if not obj: return super(BrowseModeDocumentTreeInterceptor,self)._activatePosition(obj) def _set_selection(self, info, reason=controlTypes.REASON_CARET): super(BrowseModeDocumentTreeInterceptor, self)._set_selection(info) if isScriptWaiting() or not info.isCollapsed: return # Save the last caret position for use in terminate(). # This must be done here because the buffer might be cleared just before terminate() is called, # causing the last caret position to be lost. caret = info.copy() caret.collapse() self._lastCaretPosition = caret.bookmark review.handleCaretMove(caret) if reason == controlTypes.REASON_FOCUS: self._lastCaretMoveWasFocus = True focusObj = api.getFocusObject() if focusObj==self.rootNVDAObject: return else: self._lastCaretMoveWasFocus = False focusObj=info.focusableNVDAObjectAtStart obj=info.NVDAObjectAtStart if not obj: log.debugWarning("Invalid NVDAObjectAtStart") return if obj==self.rootNVDAObject: return if focusObj and not eventHandler.isPendingEvents("gainFocus") and focusObj!=self.rootNVDAObject and focusObj != api.getFocusObject() and self._shouldSetFocusToObj(focusObj): focusObj.setFocus() obj.scrollIntoView() if self.programmaticScrollMayFireEvent: self._lastProgrammaticScrollTime = time.time() self.passThrough=self.shouldPassThrough(focusObj,reason=reason) # Queue the reporting of pass through mode so that it will be spoken after the actual content. queueHandler.queueFunction(queueHandler.eventQueue, reportPassThrough, self) def _shouldSetFocusToObj(self, obj): """Determine whether an object should receive focus. Subclasses may extend or override this method. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} """ return obj.role not in self.APPLICATION_ROLES and obj.isFocusable and obj.role!=controlTypes.ROLE_EMBEDDEDOBJECT def script_activateLongDesc(self,gesture): info=self.makeTextInfo(textInfos.POSITION_CARET) info.expand("character") for field in reversed(info.getTextWithFields()): if isinstance(field,textInfos.FieldCommand) and field.command=="controlStart": states=field.field.get('states') if states and controlTypes.STATE_HASLONGDESC in states: self._activateLongDesc(field.field) break else: # Translators: the message presented when the activateLongDescription script cannot locate a long description to activate. ui.message(_("No long description")) # Translators: the description for the activateLongDescription script on browseMode documents. script_activateLongDesc.__doc__=_("Shows the long description at this position if one is found.") def event_caretMovementFailed(self, obj, nextHandler, gesture=None): if not self.passThrough or not gesture or not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]: return nextHandler() if gesture.mainKeyName in ("home", "end"): # Home, end, control+home and control+end should not disable pass through. return nextHandler() script = self.getScript(gesture) if not script: return nextHandler() # We've hit the edge of the focused control. # Therefore, move the virtual caret to the same edge of the field. info = self.makeTextInfo(textInfos.POSITION_CARET) info.expand(info.UNIT_CONTROLFIELD) if gesture.mainKeyName in ("leftArrow", "upArrow", "pageUp"): info.collapse() else: info.collapse(end=True) info.move(textInfos.UNIT_CHARACTER, -1) info.updateCaret() scriptHandler.queueScript(script, gesture) def script_collapseOrExpandControl(self, gesture): oldFocus = api.getFocusObject() oldFocusStates = oldFocus.states gesture.send() if controlTypes.STATE_COLLAPSED in oldFocusStates: self.passThrough = True elif not self.disableAutoPassThrough: self.passThrough = False reportPassThrough(self) script_collapseOrExpandControl.ignoreTreeInterceptorPassThrough = True def _tabOverride(self, direction): """Override the tab order if the virtual caret is not within the currently focused node. This is done because many nodes are not focusable and it is thus possible for the virtual caret to be unsynchronised with the focus. In this case, we want tab/shift+tab to move to the next/previous focusable node relative to the virtual caret. If the virtual caret is within the focused node, the tab/shift+tab key should be passed through to allow normal tab order navigation. Note that this method does not pass the key through itself if it is not overridden. This should be done by the calling script if C{False} is returned. @param direction: The direction in which to move. @type direction: str @return: C{True} if the tab order was overridden, C{False} if not. @rtype: bool """ if self._lastCaretMoveWasFocus: # #5227: If the caret was last moved due to a focus change, don't override tab. # This ensures that tabbing behaves as expected after tabbing hits an iframe document. return False focus = api.getFocusObject() try: focusInfo = self.makeTextInfo(focus) except: return False # We only want to override the tab order if the caret is not within the focused node. caretInfo=self.makeTextInfo(textInfos.POSITION_CARET) #Only check that the caret is within the focus for things that ar not documents #As for documents we should always override if focus.role!=controlTypes.ROLE_DOCUMENT or controlTypes.STATE_EDITABLE in focus.states: # Expand to one character, as isOverlapping() doesn't yield the desired results with collapsed ranges. caretInfo.expand(textInfos.UNIT_CHARACTER) if focusInfo.isOverlapping(caretInfo): return False # If we reach here, we do want to override tab/shift+tab if possible. # Find the next/previous focusable node. try: item = next(self._iterNodesByType("focusable", direction, caretInfo)) except StopIteration: return False obj=item.obj newInfo=item.textInfo if obj == api.getFocusObject(): # This node is already focused, so we need to move to and speak this node here. newCaret = newInfo.copy() newCaret.collapse() self._set_selection(newCaret,reason=controlTypes.REASON_FOCUS) if self.passThrough: obj.event_gainFocus() else: speech.speakTextInfo(newInfo,reason=controlTypes.REASON_FOCUS) else: # This node doesn't have the focus, so just set focus to it. The gainFocus event will handle the rest. obj.setFocus() return True def script_tab(self, gesture): if not self._tabOverride("next"): gesture.send() def script_shiftTab(self, gesture): if not self._tabOverride("previous"): gesture.send() def event_focusEntered(self,obj,nextHandler): if obj==self.rootNVDAObject: self._enteringFromOutside = True # Even if passThrough is enabled, we still completely drop focusEntered events here. # In order to get them back when passThrough is enabled, we replay them with the _replayFocusEnteredEvents method in event_gainFocus. # The reason for this is to ensure that focusEntered events are delayed until a focus event has had a chance to disable passthrough mode. # As in this case we would not want them. def _shouldIgnoreFocus(self, obj): """Determines whether focus on a given object should be ignored. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} @return: C{True} if focus on L{obj} should be ignored, C{False} otherwise. @rtype: bool """ return False def _postGainFocus(self, obj): """Executed after a gainFocus within the browseMode document. This will not be executed if L{event_gainFocus} determined that it should abort and call nextHandler. @param obj: The object that gained focus. @type obj: L{NVDAObjects.NVDAObject} """ def _replayFocusEnteredEvents(self): # We blocked the focusEntered events because we were in browse mode, # but now that we've switched to focus mode, we need to fire them. for parent in api.getFocusAncestors()[api.getFocusDifferenceLevel():]: try: parent.event_focusEntered() except: log.exception("Error executing focusEntered event: %s" % parent) def event_gainFocus(self, obj, nextHandler): enteringFromOutside=self._enteringFromOutside self._enteringFromOutside=False if not self.isReady: if self.passThrough: self._replayFocusEnteredEvents() nextHandler() return if enteringFromOutside and not self.passThrough and self._lastFocusObj==obj: # We're entering the document from outside (not returning from an inside object/application; #3145) # and this was the last non-root node with focus, so ignore this focus event. # Otherwise, if the user switches away and back to this document, the cursor will jump to this node. # This is not ideal if the user was positioned over a node which cannot receive focus. return if obj==self.rootNVDAObject: if self.passThrough: self._replayFocusEnteredEvents() return nextHandler() return if not self.passThrough and self._shouldIgnoreFocus(obj): return self._lastFocusObj=obj try: focusInfo = self.makeTextInfo(obj) except: # This object is not in the treeInterceptor, even though it resides beneath the document. # Automatic pass through should be enabled in certain circumstances where this occurs. if not self.passThrough and self.shouldPassThrough(obj,reason=controlTypes.REASON_FOCUS): self.passThrough=True reportPassThrough(self) self._replayFocusEnteredEvents() return nextHandler() #We only want to update the caret and speak the field if we're not in the same one as before caretInfo=self.makeTextInfo(textInfos.POSITION_CARET) # Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping. caretInfo.expand(textInfos.UNIT_CHARACTER) if not self._hadFirstGainFocus or not focusInfo.isOverlapping(caretInfo): # The virtual caret is not within the focus node. oldPassThrough=self.passThrough passThrough=self.shouldPassThrough(obj,reason=controlTypes.REASON_FOCUS) if not oldPassThrough and (passThrough or sayAllHandler.isRunning()): # If pass-through is disabled, cancel speech, as a focus change should cause page reading to stop. # This must be done before auto-pass-through occurs, as we want to stop page reading even if pass-through will be automatically enabled by this focus change. speech.cancelSpeech() self.passThrough=passThrough if not self.passThrough: # We read the info from the browseMode document instead of the control itself. speech.speakTextInfo(focusInfo,reason=controlTypes.REASON_FOCUS) # However, we still want to update the speech property cache so that property changes will be spoken properly. speech.speakObject(obj,controlTypes.REASON_ONLYCACHE) else: # Although we are going to speak the object rather than textInfo content, we still need to silently speak the textInfo content so that the textInfo speech cache is updated correctly. # Not doing this would cause later browseMode speaking to either not speak controlFields it had entered, or speak controlField exits after having already exited. # See #7435 for a discussion on this. speech.speakTextInfo(focusInfo,reason=controlTypes.REASON_ONLYCACHE) self._replayFocusEnteredEvents() nextHandler() focusInfo.collapse() self._set_selection(focusInfo,reason=controlTypes.REASON_FOCUS) else: # The virtual caret was already at the focused node. if not self.passThrough: # This focus change was caused by a virtual caret movement, so don't speak the focused node to avoid double speaking. # However, we still want to update the speech property cache so that property changes will be spoken properly. speech.speakObject(obj,controlTypes.REASON_ONLYCACHE) else: self._replayFocusEnteredEvents() return nextHandler() self._postGainFocus(obj) event_gainFocus.ignoreIsReady=True def _handleScrollTo(self, obj): """Handle scrolling the browseMode document to a given object in response to an event. Subclasses should call this from an event which indicates that the document has scrolled. @postcondition: The virtual caret is moved to L{obj} and the buffer content for L{obj} is reported. @param obj: The object to which the document should scroll. @type obj: L{NVDAObjects.NVDAObject} @return: C{True} if the document was scrolled, C{False} if not. @rtype: bool @note: If C{False} is returned, calling events should probably call their nextHandler. """ if self.programmaticScrollMayFireEvent and self._lastProgrammaticScrollTime and time.time() - self._lastProgrammaticScrollTime < 0.4: # This event was probably caused by this browseMode document's call to scrollIntoView(). # Therefore, ignore it. Otherwise, the cursor may bounce back to the scroll point. # However, pretend we handled it, as we don't want it to be passed on to the object either. return True try: scrollInfo = self.makeTextInfo(obj) except: return False #We only want to update the caret and speak the field if we're not in the same one as before caretInfo=self.makeTextInfo(textInfos.POSITION_CARET) # Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping. caretInfo.expand(textInfos.UNIT_CHARACTER) if not scrollInfo.isOverlapping(caretInfo): if scrollInfo.isCollapsed: scrollInfo.expand(textInfos.UNIT_LINE) speech.speakTextInfo(scrollInfo,reason=controlTypes.REASON_CARET) scrollInfo.collapse() self.selection = scrollInfo return True return False def _isNVDAObjectInApplication(self, obj): """Determine whether a given object is within an application. The object is considered to be within an application if it or one of its ancestors has an application role. This should only be called on objects beneath the treeInterceptor's root NVDAObject. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} @return: C{True} if L{obj} is within an application, C{False} otherwise. @rtype: bool """ # We cache the result for each object we walk. # There can be browse mode documents within other documents and the result might be different between these, # so the cache must be maintained on the TreeInterceptor rather than the object itself. try: cache = self._isInAppCache except AttributeError: # Create this lazily, as this method isn't used by all browse mode implementations. cache = self._isInAppCache = weakref.WeakKeyDictionary() objs = [] def doResult(result): # Cache this on descendants we've walked over. for obj in objs: cache[obj] = result return result while obj and obj != self.rootNVDAObject: inApp = cache.get(obj) if inApp is not None: # We found a cached result. return doResult(inApp) objs.append(obj) if obj.role in self.APPLICATION_ROLES: return doResult(True) # Cache container. container = obj.container obj.container = container obj = container return doResult(False) def _get_documentConstantIdentifier(self): """Get the constant identifier for this document. This identifier should uniquely identify all instances (not just one instance) of a document for at least the current session of the hosting application. Generally, the document URL should be used. @return: The constant identifier for this document, C{None} if there is none. """ return None def _get_shouldRememberCaretPositionAcrossLoads(self): """Specifies whether the position of the caret should be remembered when this document is loaded again. This is useful when the browser remembers the scroll position for the document, but does not communicate this information via APIs. The remembered caret position is associated with this document using L{documentConstantIdentifier}. @return: C{True} if the caret position should be remembered, C{False} if not. @rtype: bool """ docConstId = self.documentConstantIdentifier # Return True if the URL indicates that this is probably a web browser document. # We do this check because we don't want to remember caret positions for email messages, etc. return isinstance(docConstId, basestring) and docConstId.split("://", 1)[0] in ("http", "https", "ftp", "ftps", "file") def _getInitialCaretPos(self): """Retrieve the initial position of the caret after the buffer has been loaded. This position, if any, will be passed to L{makeTextInfo}. Subclasses should extend this method. @return: The initial position of the caret, C{None} if there isn't one. @rtype: TextInfo position """ if self.shouldRememberCaretPositionAcrossLoads: try: return self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier] except KeyError: pass return None def getEnclosingContainerRange(self,range): range=range.copy() range.collapse() try: item = next(self._iterNodesByType("container", "up", range)) except (NotImplementedError,StopIteration): try: item = next(self._iterNodesByType("landmark", "up", range)) except (NotImplementedError,StopIteration): return return item.textInfo def script_moveToStartOfContainer(self,gesture): info=self.makeTextInfo(textInfos.POSITION_CARET) info.expand(textInfos.UNIT_CHARACTER) container=self.getEnclosingContainerRange(info) if not container: # Translators: Reported when the user attempts to move to the start or end of a container # (list, table, etc.) but there is no container. ui.message(_("Not in a container")) return container.collapse() self._set_selection(container, reason=REASON_QUICKNAV) if not willSayAllResume(gesture): container.expand(textInfos.UNIT_LINE) speech.speakTextInfo(container, reason=controlTypes.REASON_FOCUS) script_moveToStartOfContainer.resumeSayAllMode=sayAllHandler.CURSOR_CARET # Translators: Description for the Move to start of container command in browse mode. script_moveToStartOfContainer.__doc__=_("Moves to the start of the container element, such as a list or table") def script_movePastEndOfContainer(self,gesture): info=self.makeTextInfo(textInfos.POSITION_CARET) info.expand(textInfos.UNIT_CHARACTER) container=self.getEnclosingContainerRange(info) if not container: # Translators: Reported when the user attempts to move to the start or end of a container # (list, table, etc.) but there is no container. ui.message(_("Not in a container")) return container.collapse(end=True) docEnd=container.obj.makeTextInfo(textInfos.POSITION_LAST) if container.compareEndPoints(docEnd,"endToEnd")>=0: container=docEnd # Translators: a message reported when: # Review cursor is at the bottom line of the current navigator object. # Landing at the end of a browse mode document when trying to jump to the end of the current container. ui.message(_("Bottom")) self._set_selection(container, reason=REASON_QUICKNAV) if not willSayAllResume(gesture): container.expand(textInfos.UNIT_LINE) speech.speakTextInfo(container, reason=controlTypes.REASON_FOCUS) script_movePastEndOfContainer.resumeSayAllMode=sayAllHandler.CURSOR_CARET # Translators: Description for the Move past end of container command in browse mode. script_movePastEndOfContainer.__doc__=_("Moves past the end of the container element, such as a list or table") NOT_LINK_BLOCK_MIN_LEN = 30 def _isSuitableNotLinkBlock(self,range): return len(range.text)>=self.NOT_LINK_BLOCK_MIN_LEN def _iterNotLinkBlock(self, direction="next", pos=None): links = self._iterNodesByType("link", direction=direction, pos=pos) # We want to compare each link against the next link. item1 = next(links) while True: item2 = next(links) # If the distance between the links is small, this is probably just a piece of non-link text within a block of links; e.g. an inactive link of a nav bar. if direction=="previous": range=item1.textInfo.copy() range.collapse() range.setEndPoint(item2.textInfo,"startToEnd") else: range=item2.textInfo.copy() range.collapse() range.setEndPoint(item1.textInfo,"startToEnd") if self._isSuitableNotLinkBlock(range): yield TextInfoQuickNavItem("notLinkBlock",self,range) item1=item2 __gestures={ "kb:NVDA+d": "activateLongDesc", "kb:alt+upArrow": "collapseOrExpandControl", "kb:alt+downArrow": "collapseOrExpandControl", "kb:tab": "tab", "kb:shift+tab": "shiftTab", "kb:shift+,": "moveToStartOfContainer", "kb:,": "movePastEndOfContainer", }
1
22,046
Not super important, but this line could be split up.
nvaccess-nvda
py
@@ -150,6 +150,17 @@ void h2o_context_dispose(h2o_context_t *ctx) h2o_filecache_destroy(ctx->filecache); ctx->filecache = NULL; + /* clear storage */ + for (i = 0; i != ctx->storage.size; ++i) { + h2o_context_storage_item_t item = ctx->storage.entries[i]; + if (item.data != NULL) { + if (item.dispose != NULL) { + item.dispose(item.data); + } + } + } + free(ctx->storage.entries); + /* TODO assert that the all the getaddrinfo threads are idle */ h2o_multithread_unregister_receiver(ctx->queue, &ctx->receivers.hostinfo_getaddr); h2o_multithread_destroy_queue(ctx->queue);
1
/* * Copyright (c) 2014 DeNA Co., Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <stddef.h> #include <stdlib.h> #include <sys/time.h> #include "h2o.h" #include "h2o/memcached.h" void h2o_context_init_pathconf_context(h2o_context_t *ctx, h2o_pathconf_t *pathconf) { /* add pathconf to the inited list (or return if already inited) */ size_t i; for (i = 0; i != ctx->_pathconfs_inited.size; ++i) if (ctx->_pathconfs_inited.entries[i] == pathconf) return; h2o_vector_reserve(NULL, &ctx->_pathconfs_inited, ctx->_pathconfs_inited.size + 1); ctx->_pathconfs_inited.entries[ctx->_pathconfs_inited.size++] = pathconf; #define DOIT(type, list) \ do { \ size_t i; \ for (i = 0; i != pathconf->list.size; ++i) { \ type *o = pathconf->list.entries[i]; \ if (o->on_context_init != NULL) \ o->on_context_init(o, ctx); \ } \ } while (0) DOIT(h2o_handler_t, handlers); DOIT(h2o_filter_t, filters); DOIT(h2o_logger_t, loggers); #undef DOIT } void h2o_context_dispose_pathconf_context(h2o_context_t *ctx, h2o_pathconf_t *pathconf) { /* nullify pathconf in the inited list (or return if already disposed) */ size_t i; for (i = 0; i != ctx->_pathconfs_inited.size; ++i) if (ctx->_pathconfs_inited.entries[i] == pathconf) break; if (i == ctx->_pathconfs_inited.size) return; ctx->_pathconfs_inited.entries[i] = NULL; #define DOIT(type, list) \ do { \ size_t i; \ for (i = 0; i != pathconf->list.size; ++i) { \ type *o = pathconf->list.entries[i]; \ if (o->on_context_dispose != NULL) \ o->on_context_dispose(o, ctx); \ } \ } while (0) DOIT(h2o_handler_t, handlers); DOIT(h2o_filter_t, filters); DOIT(h2o_logger_t, loggers); #undef DOIT } void h2o_context_init(h2o_context_t *ctx, h2o_loop_t *loop, h2o_globalconf_t *config) { size_t i, j; assert(config->hosts[0] != NULL); memset(ctx, 0, sizeof(*ctx)); ctx->loop = loop; ctx->globalconf = config; h2o_timeout_init(ctx->loop, &ctx->zero_timeout, 0); h2o_timeout_init(ctx->loop, &ctx->one_sec_timeout, 1000); h2o_timeout_init(ctx->loop, &ctx->hundred_ms_timeout, 100); ctx->queue = h2o_multithread_create_queue(loop); h2o_multithread_register_receiver(ctx->queue, &ctx->receivers.hostinfo_getaddr, h2o_hostinfo_getaddr_receiver); ctx->filecache = h2o_filecache_create(config->filecache.capacity); h2o_timeout_init(ctx->loop, &ctx->handshake_timeout, config->handshake_timeout); h2o_timeout_init(ctx->loop, &ctx->http1.req_timeout, config->http1.req_timeout); h2o_linklist_init_anchor(&ctx->http1._conns); h2o_timeout_init(ctx->loop, &ctx->http2.idle_timeout, config->http2.idle_timeout); h2o_linklist_init_anchor(&ctx->http2._conns); ctx->proxy.client_ctx.loop = loop; h2o_timeout_init(ctx->loop, &ctx->proxy.io_timeout, config->proxy.io_timeout); ctx->proxy.client_ctx.getaddr_receiver = &ctx->receivers.hostinfo_getaddr; ctx->proxy.client_ctx.io_timeout = &ctx->proxy.io_timeout; ctx->proxy.client_ctx.ssl_ctx = config->proxy.ssl_ctx; ctx->_module_configs = h2o_mem_alloc(sizeof(*ctx->_module_configs) * config->_num_config_slots); memset(ctx->_module_configs, 0, sizeof(*ctx->_module_configs) * config->_num_config_slots); static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_lock(&mutex); for (i = 0; config->hosts[i] != NULL; ++i) { h2o_hostconf_t *hostconf = config->hosts[i]; for (j = 0; j != hostconf->paths.size; ++j) { h2o_pathconf_t *pathconf = hostconf->paths.entries + j; h2o_context_init_pathconf_context(ctx, pathconf); } h2o_context_init_pathconf_context(ctx, &hostconf->fallback_path); } pthread_mutex_unlock(&mutex); } void h2o_context_dispose(h2o_context_t *ctx) { h2o_globalconf_t *config = ctx->globalconf; size_t i, j; for (i = 0; config->hosts[i] != NULL; ++i) { h2o_hostconf_t *hostconf = config->hosts[i]; for (j = 0; j != hostconf->paths.size; ++j) { h2o_pathconf_t *pathconf = hostconf->paths.entries + j; h2o_context_dispose_pathconf_context(ctx, pathconf); } h2o_context_dispose_pathconf_context(ctx, &hostconf->fallback_path); } free(ctx->_pathconfs_inited.entries); free(ctx->_module_configs); h2o_timeout_dispose(ctx->loop, &ctx->zero_timeout); h2o_timeout_dispose(ctx->loop, &ctx->one_sec_timeout); h2o_timeout_dispose(ctx->loop, &ctx->hundred_ms_timeout); h2o_timeout_dispose(ctx->loop, &ctx->handshake_timeout); h2o_timeout_dispose(ctx->loop, &ctx->http1.req_timeout); h2o_timeout_dispose(ctx->loop, &ctx->http2.idle_timeout); h2o_timeout_dispose(ctx->loop, &ctx->proxy.io_timeout); /* what should we do here? assert(!h2o_linklist_is_empty(&ctx->http2._conns); */ h2o_filecache_destroy(ctx->filecache); ctx->filecache = NULL; /* TODO assert that the all the getaddrinfo threads are idle */ h2o_multithread_unregister_receiver(ctx->queue, &ctx->receivers.hostinfo_getaddr); h2o_multithread_destroy_queue(ctx->queue); #if H2O_USE_LIBUV /* make sure the handles released by h2o_timeout_dispose get freed */ uv_run(ctx->loop, UV_RUN_NOWAIT); #endif } void h2o_context_request_shutdown(h2o_context_t *ctx) { ctx->shutdown_requested = 1; if (ctx->globalconf->http1.callbacks.request_shutdown != NULL) ctx->globalconf->http1.callbacks.request_shutdown(ctx); if (ctx->globalconf->http2.callbacks.request_shutdown != NULL) ctx->globalconf->http2.callbacks.request_shutdown(ctx); } void h2o_context_update_timestamp_cache(h2o_context_t *ctx) { time_t prev_sec = ctx->_timestamp_cache.tv_at.tv_sec; ctx->_timestamp_cache.uv_now_at = h2o_now(ctx->loop); gettimeofday(&ctx->_timestamp_cache.tv_at, NULL); if (ctx->_timestamp_cache.tv_at.tv_sec != prev_sec) { struct tm gmt; /* update the string cache */ if (ctx->_timestamp_cache.value != NULL) h2o_mem_release_shared(ctx->_timestamp_cache.value); ctx->_timestamp_cache.value = h2o_mem_alloc_shared(NULL, sizeof(h2o_timestamp_string_t), NULL); gmtime_r(&ctx->_timestamp_cache.tv_at.tv_sec, &gmt); h2o_time2str_rfc1123(ctx->_timestamp_cache.value->rfc1123, &gmt); h2o_time2str_log(ctx->_timestamp_cache.value->log, ctx->_timestamp_cache.tv_at.tv_sec); } }
1
11,468
Maybe we should better call the dispose function without checking the value of `item.data` (since the dispose function is setup before `data` is set up).
h2o-h2o
c
@@ -33,6 +33,7 @@ inline void SyncedMemory::to_cpu() { CaffeMallocHost(&cpu_ptr_, size_); own_cpu_data_ = true; } + Caffe::set_mode(Caffe::GPU); caffe_memcpy(size_, gpu_ptr_, cpu_ptr_); head_ = SYNCED; break;
1
// Copyright 2014 BVLC and contributors. #include <cuda_runtime.h> #include <cstring> #include "caffe/common.hpp" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { SyncedMemory::~SyncedMemory() { if (cpu_ptr_ && own_cpu_data_) { CaffeFreeHost(cpu_ptr_); } if (gpu_ptr_) { CUDA_CHECK(cudaFree(gpu_ptr_)); } } inline void SyncedMemory::to_cpu() { switch (head_) { case UNINITIALIZED: CaffeMallocHost(&cpu_ptr_, size_); memset(cpu_ptr_, 0, size_); head_ = HEAD_AT_CPU; own_cpu_data_ = true; break; case HEAD_AT_GPU: if (cpu_ptr_ == NULL) { CaffeMallocHost(&cpu_ptr_, size_); own_cpu_data_ = true; } caffe_memcpy(size_, gpu_ptr_, cpu_ptr_); head_ = SYNCED; break; case HEAD_AT_CPU: case SYNCED: break; } } inline void SyncedMemory::to_gpu() { switch (head_) { case UNINITIALIZED: CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); CUDA_CHECK(cudaMemset(gpu_ptr_, 0, size_)); head_ = HEAD_AT_GPU; break; case HEAD_AT_CPU: if (gpu_ptr_ == NULL) { CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); } caffe_memcpy(size_, cpu_ptr_, gpu_ptr_); head_ = SYNCED; break; case HEAD_AT_GPU: case SYNCED: break; } } const void* SyncedMemory::cpu_data() { to_cpu(); return (const void*)cpu_ptr_; } void SyncedMemory::set_cpu_data(void* data) { CHECK(data); if (own_cpu_data_) { CaffeFreeHost(cpu_ptr_); } cpu_ptr_ = data; head_ = HEAD_AT_CPU; own_cpu_data_ = false; } const void* SyncedMemory::gpu_data() { to_gpu(); return (const void*)gpu_ptr_; } void* SyncedMemory::mutable_cpu_data() { to_cpu(); head_ = HEAD_AT_CPU; return cpu_ptr_; } void* SyncedMemory::mutable_gpu_data() { to_gpu(); head_ = HEAD_AT_GPU; return gpu_ptr_; } } // namespace caffe
1
29,361
Why set to GPU, is it CPU?
BVLC-caffe
cpp
@@ -1,10 +1,16 @@ import AppointmentIndexRoute from 'hospitalrun/appointments/index/route'; import moment from 'moment'; import { translationMacro as t } from 'ember-i18n'; +import Ember from 'ember'; + +const { computed } = Ember; + export default AppointmentIndexRoute.extend({ editReturn: 'appointments.today', modelName: 'appointment', - pageTitle: t('appointments.todayTitle'), + pageTitle: computed('i18n', () => { + return t('appointments.todayTitle'); + }), _modelQueryParams() { let endOfDay = moment().endOf('day').toDate().getTime();
1
import AppointmentIndexRoute from 'hospitalrun/appointments/index/route'; import moment from 'moment'; import { translationMacro as t } from 'ember-i18n'; export default AppointmentIndexRoute.extend({ editReturn: 'appointments.today', modelName: 'appointment', pageTitle: t('appointments.todayTitle'), _modelQueryParams() { let endOfDay = moment().endOf('day').toDate().getTime(); let maxValue = this.get('maxValue'); let startOfDay = moment().startOf('day').toDate().getTime(); return { options: { startkey: [startOfDay, null, 'appointment_'], endkey: [endOfDay, endOfDay, `appointment_${maxValue}`] }, mapReduce: 'appointments_by_date' }; } });
1
13,693
This should be `computed('i18n.locale'....`
HospitalRun-hospitalrun-frontend
js
@@ -49,7 +49,7 @@ type Session struct { DataTransferred DataTransferred TokensEarned uint64 Last bool - done chan struct{} + Done chan struct{} } // newSession creates a blank new session with an ID.
1
/* * Copyright (C) 2017 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package session import ( "time" "github.com/gofrs/uuid" "github.com/mysteriumnetwork/node/identity" ) // ID represents session id type. type ID string // PaymentEngine is responsible for interacting with the consumer in regard to payments. type PaymentEngine interface { Start() error Stop() } // DataTransferred represents the data transferred on each session. type DataTransferred struct { Up, Down uint64 } // Session structure holds all required information about current session between service consumer and provider. type Session struct { ID ID ConsumerID identity.Identity Config ServiceConfiguration ServiceID string ServiceType string CreatedAt time.Time DataTransferred DataTransferred TokensEarned uint64 Last bool done chan struct{} } // newSession creates a blank new session with an ID. func newSession() (*Session, error) { uid, err := uuid.NewV4() if err != nil { return nil, err } return &Session{ID: ID(uid.String())}, nil } // ServiceConfiguration defines service configuration from underlying transport mechanism to be passed to remote party // should be serializable to json format. type ServiceConfiguration interface{} // PaymentInfo represents the payment version information type PaymentInfo struct { Supports string `json:"supported"` }
1
15,998
can we not expose the channel directly? or at least expose it as a read only channel?
mysteriumnetwork-node
go
@@ -88,6 +88,17 @@ AS_IF([test "$YACC" != "bison -y"], [AC_MSG_ERROR([bison not found but required] PKG_PROG_PKG_CONFIG +# PKG_CHECK_VAR added to pkg-config 0.28 +m4_define_default( + [PKG_CHECK_VAR], + [AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config]) + AS_IF([test -z "$$1"], [$1=`$PKG_CONFIG --variable="$3" "$2"`]) + AS_IF([test -n "$$1"], [$4], [$5])]) + +PKG_CHECK_VAR(bashcompdir, [bash-completion], [completionsdir], , + bashcompdir="${sysconfdir}/bash_completion.d") +AC_SUBST(bashcompdir) + AM_PATH_GLIB_2_0(,,AC_MSG_ERROR([GLib not found])) dnl When bumping the gio-unix-2.0 dependency (or glib-2.0 in general),
1
AC_PREREQ([2.63]) dnl If doing a final release, remember to follow the instructions to dnl update libostree-released.sym from libostree-devel.sym, and update the checksum dnl in test-symbols.sh, and also set is_release_build=yes below. Then make dnl another post-release commit to bump the version, and set is_release_build=no. m4_define([year_version], [2017]) m4_define([release_version], [10]) m4_define([package_version], [year_version.release_version]) AC_INIT([libostree], [package_version], [[email protected]]) is_release_build=no AC_CONFIG_HEADER([config.h]) AC_CONFIG_MACRO_DIR([buildutil]) AC_CONFIG_AUX_DIR([build-aux]) AM_INIT_AUTOMAKE([1.13 -Wno-portability foreign no-define tar-ustar no-dist-gzip dist-xz color-tests subdir-objects]) AM_MAINTAINER_MODE([enable]) AM_SILENT_RULES([yes]) AC_USE_SYSTEM_EXTENSIONS AC_SYS_LARGEFILE AC_PROG_CC AM_PROG_CC_C_O AC_PROG_YACC dnl Versioning information AC_SUBST([YEAR_VERSION], [year_version]) AC_SUBST([RELEASE_VERSION], [release_version]) AC_SUBST([PACKAGE_VERSION], [package_version]) AS_IF([echo "$CFLAGS" | grep -q -E -e '-Werror($| )'], [], [ CC_CHECK_FLAGS_APPEND([WARN_CFLAGS], [CFLAGS], [\ -pipe \ -Wall \ -Werror=empty-body \ -Werror=strict-prototypes \ -Werror=missing-prototypes \ -Werror=implicit-function-declaration \ "-Werror=format=2 -Werror=format-security -Werror=format-nonliteral" \ -Werror=pointer-arith -Werror=init-self \ -Werror=missing-declarations \ -Werror=return-type \ -Werror=overflow \ -Werror=int-conversion \ -Werror=parenthesis \ -Werror=incompatible-pointer-types \ -Werror=misleading-indentation \ -Werror=missing-include-dirs -Werror=aggregate-return \ -Werror=unused-result \ ])]) AC_SUBST(WARN_CFLAGS) AC_MSG_CHECKING([for -fsanitize=address in CFLAGS]) if echo $CFLAGS | grep -q -e -fsanitize=address; then AC_MSG_RESULT([yes]) using_asan=yes else AC_MSG_RESULT([no]) fi AM_CONDITIONAL(BUILDOPT_ASAN, [test x$using_asan = xyes]) AM_COND_IF([BUILDOPT_ASAN], [AC_DEFINE([BUILDOPT_ASAN], 1, [Define if we are building with -fsanitize=address])]) AC_MSG_CHECKING([for -fsanitize=thread in CFLAGS]) if echo $CFLAGS | grep -q -e -fsanitize=thread; then AC_MSG_RESULT([yes]) using_tsan=yes else AC_MSG_RESULT([no]) fi AM_CONDITIONAL(BUILDOPT_TSAN, [test x$using_tsan = xyes]) AM_COND_IF([BUILDOPT_TSAN], [AC_DEFINE([BUILDOPT_TSAN], 1, [Define if we are building with -fsanitize=thread])]) # Initialize libtool LT_PREREQ([2.2.4]) LT_INIT([disable-static]) OSTREE_FEATURES="" AC_SUBST([OSTREE_FEATURES]) GLIB_TESTS LIBGLNX_CONFIGURE AC_CHECK_HEADER([sys/xattr.h],,[AC_MSG_ERROR([You must have sys/xattr.h from glibc])]) AS_IF([test "$YACC" != "bison -y"], [AC_MSG_ERROR([bison not found but required])]) PKG_PROG_PKG_CONFIG AM_PATH_GLIB_2_0(,,AC_MSG_ERROR([GLib not found])) dnl When bumping the gio-unix-2.0 dependency (or glib-2.0 in general), dnl remember to bump GLIB_VERSION_MIN_REQUIRED and dnl GLIB_VERSION_MAX_ALLOWED in Makefile.am GIO_DEPENDENCY="gio-unix-2.0 >= 2.40.0" PKG_CHECK_MODULES(OT_DEP_GIO_UNIX, $GIO_DEPENDENCY) dnl 5.1.0 is an arbitrary version here PKG_CHECK_MODULES(OT_DEP_LZMA, liblzma >= 5.0.5) dnl Needed for rollsum PKG_CHECK_MODULES(OT_DEP_ZLIB, zlib) dnl We're not actually linking to this, just using the header PKG_CHECK_MODULES(OT_DEP_E2P, e2p) dnl Arbitrary version that's in CentOS7.2 now CURL_DEPENDENCY=7.29.0 AC_ARG_WITH(curl, AS_HELP_STRING([--with-curl], [Use libcurl @<:@default=no@:>@]), [], [with_curl=no]) AS_IF([test x$with_curl != xno ], [ PKG_CHECK_MODULES(OT_DEP_CURL, libcurl >= $CURL_DEPENDENCY) with_curl=yes AC_DEFINE([HAVE_LIBCURL], 1, [Define if we have libcurl.pc]) dnl Currently using libcurl requires soup for trivial-httpd for tests with_soup_default=yes ], [with_soup_default=check]) AM_CONDITIONAL(USE_CURL, test x$with_curl != xno) if test x$with_curl = xyes; then OSTREE_FEATURES="$OSTREE_FEATURES libcurl"; fi dnl When bumping the libsoup-2.4 dependency, remember to bump dnl SOUP_VERSION_MIN_REQUIRED and SOUP_VERSION_MAX_ALLOWED in dnl Makefile.am SOUP_DEPENDENCY="libsoup-2.4 >= 2.39.1" AC_ARG_WITH(soup, AS_HELP_STRING([--with-soup], [Use libsoup @<:@default=yes@:>@]), [], [with_soup=$with_soup_default]) AS_IF([test x$with_soup != xno], [ AC_ARG_ENABLE(libsoup_client_certs, AS_HELP_STRING([--enable-libsoup-client-certs], [Require availability of new enough libsoup TLS client cert API (default: auto)]),, [enable_libsoup_client_certs=auto]) AC_MSG_CHECKING([for $SOUP_DEPENDENCY]) PKG_CHECK_EXISTS($SOUP_DEPENDENCY, have_soup=yes, have_soup=no) AC_MSG_RESULT([$have_soup]) AS_IF([ test x$have_soup = xno && test x$with_soup != xcheck], [ AC_MSG_ERROR([libsoup is enabled but could not be found]) ]) AS_IF([test x$have_soup = xyes], [ PKG_CHECK_MODULES(OT_DEP_SOUP, $SOUP_DEPENDENCY) AC_DEFINE([HAVE_LIBSOUP], 1, [Define if we have libsoup.pc]) with_soup=yes save_CFLAGS=$CFLAGS CFLAGS=$OT_DEP_SOUP_CFLAGS have_libsoup_client_certs=no AC_CHECK_DECL([SOUP_SESSION_TLS_INTERACTION], [ AC_DEFINE([HAVE_LIBSOUP_CLIENT_CERTS], 1, [Define if we have libsoup client certs]) have_libsoup_client_certs=yes ], [], [#include <libsoup/soup.h>]) AS_IF([test x$enable_libsoup_client_certs = xyes && test x$have_libsoup_client_certs != xyes], [ AC_MSG_ERROR([libsoup client certs explicitly requested but not found]) ]) CFLAGS=$save_CFLAGS ], [ with_soup=no ]) ], [ with_soup=no ]) if test x$with_soup != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libsoup"; fi AM_CONDITIONAL(USE_LIBSOUP, test x$with_soup != xno) AM_CONDITIONAL(HAVE_LIBSOUP_CLIENT_CERTS, test x$have_libsoup_client_certs = xyes) AC_ARG_ENABLE(trivial-httpd-cmdline, [AS_HELP_STRING([--enable-trivial-httpd-cmdline], [Continue to support "ostree trivial-httpd" [default=no]])],, enable_trivial_httpd_cmdline=no) AM_CONDITIONAL(BUILDOPT_TRIVIAL_HTTPD, test x$enable_trivial_httpd_cmdline = xyes) AM_COND_IF(BUILDOPT_TRIVIAL_HTTPD, [AC_DEFINE([BUILDOPT_ENABLE_TRIVIAL_HTTPD_CMDLINE], 1, [Define if we are enabling ostree trivial-httpd entrypoint])] ) AS_IF([test x$with_curl = xyes && test x$with_soup = xno], [ AC_MSG_ERROR([Curl enabled, but libsoup is not; libsoup is needed for tests]) ]) AM_CONDITIONAL(USE_CURL_OR_SOUP, test x$with_curl != xno || test x$with_soup != xno) AS_IF([test x$with_curl != xno || test x$with_soup != xno], [AC_DEFINE([HAVE_LIBCURL_OR_LIBSOUP], 1, [Define if we have soup or curl])]) AS_IF([test x$with_curl = xyes], [fetcher_backend=curl], [test x$with_soup = xyes], [fetcher_backend=libsoup], [fetcher_backend=none]) m4_ifdef([GOBJECT_INTROSPECTION_CHECK], [ GOBJECT_INTROSPECTION_CHECK([1.34.0]) ]) AM_CONDITIONAL(BUILDOPT_INTROSPECTION, test "x$found_introspection" = xyes) LIBGPGME_DEPENDENCY="1.1.8" PKG_CHECK_MODULES(OT_DEP_GPGME, gpgme-pthread >= $LIBGPGME_DEPENDENCY, have_gpgme=yes, [ m4_ifdef([AM_PATH_GPGME_PTHREAD], [ AM_PATH_GPGME_PTHREAD($LIBGPGME_DEPENDENCY, have_gpgme=yes, have_gpgme=no) ],[ have_gpgme=no ]) ]) AS_IF([ test x$have_gpgme = xno ], [ AC_MSG_ERROR([Need GPGME_PTHREAD version $LIBGPGME_DEPENDENCY or later]) ]) OSTREE_FEATURES="$OSTREE_FEATURES gpgme" LIBARCHIVE_DEPENDENCY="libarchive >= 2.8.0" # What's in RHEL7.2. FUSE_DEPENDENCY="fuse >= 2.9.2" # check for gtk-doc m4_ifdef([GTK_DOC_CHECK], [ GTK_DOC_CHECK([1.15], [--flavour no-tmpl]) ],[ enable_gtk_doc=no AM_CONDITIONAL([ENABLE_GTK_DOC], false) ]) AC_ARG_ENABLE(man, [AS_HELP_STRING([--enable-man], [generate man pages [default=auto]])],, enable_man=maybe) AS_IF([test "$enable_man" != no], [ AC_PATH_PROG([XSLTPROC], [xsltproc]) AS_IF([test -z "$XSLTPROC"], [ AS_IF([test "$enable_man" = yes], [ AC_MSG_ERROR([xsltproc is required for --enable-man]) ]) enable_man=no ],[ enable_man=yes ]) ]) AM_CONDITIONAL(ENABLE_MAN, test "$enable_man" != no) AC_ARG_ENABLE(rust, [AS_HELP_STRING([--enable-rust], [Compile Rust code instead of C [default=no]])],, [enable_rust=no; rust_debug_release=no]) AS_IF([test "$enable_rust" = yes], [ AC_PATH_PROG([cargo], [cargo]) AS_IF([test -z "$cargo"], [AC_MSG_ERROR([cargo is required for --enable-rust])]) AC_PATH_PROG([rustc], [rustc]) AS_IF([test -z "$rustc"], [AC_MSG_ERROR([rustc is required for --enable-rust])]) dnl These bits based on gnome:librsvg/configure.ac dnl By default, we build in public release mode. AC_ARG_ENABLE(rust-debug, AC_HELP_STRING([--enable-rust-debug], [Build Rust code with debugging information [default=no]]), [rust_debug_release=$enableval], [rust_debug_release=release]) AC_MSG_CHECKING(whether to build Rust code with debugging information) if test "x$rust_debug_release" = "xyes" ; then rust_debug_release=debug AC_MSG_RESULT(yes) else AC_MSG_RESULT(no) fi RUST_TARGET_SUBDIR=${rust_debug_release} AC_SUBST([RUST_TARGET_SUBDIR]) ]) AM_CONDITIONAL(RUST_DEBUG, [test "x$rust_debug_release" = "xdebug"]) AM_CONDITIONAL(ENABLE_RUST, [test "$enable_rust" != no]) AC_ARG_WITH(libarchive, AS_HELP_STRING([--without-libarchive], [Do not use libarchive]), :, with_libarchive=maybe) AS_IF([ test x$with_libarchive != xno ], [ AC_MSG_CHECKING([for $LIBARCHIVE_DEPENDENCY]) PKG_CHECK_EXISTS($LIBARCHIVE_DEPENDENCY, have_libarchive=yes, have_libarchive=no) AC_MSG_RESULT([$have_libarchive]) AS_IF([ test x$have_libarchive = xno && test x$with_libarchive != xmaybe ], [ AC_MSG_ERROR([libarchive is enabled but could not be found]) ]) AS_IF([ test x$have_libarchive = xyes], [ AC_DEFINE([HAVE_LIBARCHIVE], 1, [Define if we have libarchive.pc]) PKG_CHECK_MODULES(OT_DEP_LIBARCHIVE, $LIBARCHIVE_DEPENDENCY) save_LIBS=$LIBS LIBS=$OT_DEP_LIBARCHIVE_LIBS AC_CHECK_FUNCS(archive_read_support_filter_all) LIBS=$save_LIBS with_libarchive=yes ], [ with_libarchive=no ]) ], [ with_libarchive=no ]) if test x$with_libarchive != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libarchive"; fi AM_CONDITIONAL(USE_LIBARCHIVE, test $with_libarchive != no) dnl This is what is in RHEL7 anyways SELINUX_DEPENDENCY="libselinux >= 2.1.13" AC_ARG_WITH(selinux, AS_HELP_STRING([--without-selinux], [Do not use SELinux]), :, with_selinux=maybe) AS_IF([ test x$with_selinux != xno ], [ AC_MSG_CHECKING([for $SELINUX_DEPENDENCY]) PKG_CHECK_EXISTS($SELINUX_DEPENDENCY, have_selinux=yes, have_selinux=no) AC_MSG_RESULT([$have_selinux]) AS_IF([ test x$have_selinux = xno && test x$with_selinux != xmaybe ], [ AC_MSG_ERROR([SELinux is enabled but could not be found]) ]) AS_IF([ test x$have_selinux = xyes], [ AC_DEFINE([HAVE_SELINUX], 1, [Define if we have libselinux.pc]) PKG_CHECK_MODULES(OT_DEP_SELINUX, $SELINUX_DEPENDENCY) with_selinux=yes ], [ with_selinux=no ]) ], [ with_selinux=no ]) if test x$with_selinux != xno; then OSTREE_FEATURES="$OSTREE_FEATURES selinux"; fi AM_CONDITIONAL(USE_SELINUX, test $with_selinux != no) AC_ARG_WITH(smack, AS_HELP_STRING([--with-smack], [Enable smack]), :, with_smack=no) AS_IF([ test x$with_smack = xyes], [ AC_DEFINE([WITH_SMACK], 1, [Define if we have smack.pc]) ]) AM_CONDITIONAL(USE_SMACK, test $with_smack != no) dnl begin openssl (really just libcrypto right now) OPENSSL_DEPENDENCY="libcrypto >= 1.0.1" AC_ARG_WITH(openssl, AS_HELP_STRING([--with-openssl], [Enable use of OpenSSL libcrypto (checksums)]), :, with_openssl=no) AS_IF([ test x$with_openssl != xno ], [ PKG_CHECK_MODULES(OT_DEP_OPENSSL, $OPENSSL_DEPENDENCY) AC_DEFINE([HAVE_OPENSSL], 1, [Define if we have openssl]) with_openssl=yes ], [ with_openssl=no ]) if test x$with_openssl != xno; then OSTREE_FEATURES="$OSTREE_FEATURES openssl"; fi AM_CONDITIONAL(USE_OPENSSL, test $with_openssl != no) dnl end openssl dnl Avahi dependency for finding repos AVAHI_DEPENDENCY="avahi-client >= 0.6.31 avahi-glib >= 0.6.31" AC_ARG_WITH(avahi, AS_HELP_STRING([--without-avahi], [Do not use Avahi]), :, with_avahi=maybe) AS_IF([ test x$with_avahi != xno ], [ AC_MSG_CHECKING([for $AVAHI_DEPENDENCY]) PKG_CHECK_EXISTS($AVAHI_DEPENDENCY, have_avahi=yes, have_avahi=no) AC_MSG_RESULT([$have_avahi]) AS_IF([ test x$have_avahi = xno && test x$with_avahi != xmaybe ], [ AC_MSG_ERROR([Avahi is enabled but could not be found]) ]) AS_IF([ test x$have_avahi = xyes], [ AC_DEFINE([HAVE_AVAHI], 1, [Define if we have avahi-client.pc and avahi-glib.pc]) PKG_CHECK_MODULES(OT_DEP_AVAHI, $AVAHI_DEPENDENCY) with_avahi=yes ], [ with_avahi=no ]) ], [ with_avahi=no ]) if test x$with_avahi != xno; then OSTREE_FEATURES="$OSTREE_FEATURES avahi"; fi AM_CONDITIONAL(USE_AVAHI, test $with_avahi != no) dnl This is what is in RHEL7.2 right now, picking it arbitrarily LIBMOUNT_DEPENDENCY="mount >= 2.23.0" AC_ARG_WITH(libmount, AS_HELP_STRING([--without-libmount], [Do not use libmount]), :, with_libmount=maybe) AS_IF([ test x$with_libmount != xno ], [ AC_MSG_CHECKING([for $LIBMOUNT_DEPENDENCY]) PKG_CHECK_EXISTS($LIBMOUNT_DEPENDENCY, have_libmount=yes, have_libmount=no) AC_MSG_RESULT([$have_libmount]) AS_IF([ test x$have_libmount = xno && test x$with_libmount != xmaybe ], [ AC_MSG_ERROR([libmount is enabled but could not be found]) ]) AS_IF([ test x$have_libmount = xyes], [ AC_DEFINE([HAVE_LIBMOUNT], 1, [Define if we have libmount.pc]) PKG_CHECK_MODULES(OT_DEP_LIBMOUNT, $LIBMOUNT_DEPENDENCY) with_libmount=yes save_LIBS=$LIBS LIBS=$OT_DEP_LIBMOUNT_LIBS AC_CHECK_FUNCS(mnt_unref_cache) LIBS=$save_LIBS ], [ with_libmount=no ]) ], [ with_libmount=no ]) if test x$with_libmount != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libmount"; fi AM_CONDITIONAL(USE_LIBMOUNT, test $with_libmount != no) # Enabled by default because I think people should use it. AC_ARG_ENABLE(rofiles-fuse, [AS_HELP_STRING([--enable-rofiles-fuse], [generate rofiles-fuse helper [default=yes]])],, enable_rofiles_fuse=yes) AS_IF([ test x$enable_rofiles_fuse != xno ], [ PKG_CHECK_MODULES(BUILDOPT_FUSE, $FUSE_DEPENDENCY) ], [enable_rofiles_fuse=no]) AM_CONDITIONAL(BUILDOPT_FUSE, test x$enable_rofiles_fuse = xyes) AC_ARG_WITH(dracut, AS_HELP_STRING([--with-dracut], [Install dracut module (default: no)]),, [with_dracut=no]) case x$with_dracut in xno) ;; xyes) ;; xyesbutnoconf) ;; *) AC_MSG_ERROR([Unknown --with-dracut value $with_dracut]) esac AM_CONDITIONAL(BUILDOPT_DRACUT, test x$with_dracut = xyes || test x$with_dracut = xyesbutnoconf) AM_CONDITIONAL(BUILDOPT_DRACUT_CONF, test x$with_dracut = xyes) AC_ARG_WITH(mkinitcpio, AS_HELP_STRING([--with-mkinitcpio], [Install mkinitcpio module (default: no)]),, [with_mkinitcpio=no]) AM_CONDITIONAL(BUILDOPT_MKINITCPIO, test x$with_mkinitcpio = xyes) dnl We have separate checks for libsystemd and the unit dir for historical reasons PKG_CHECK_MODULES([LIBSYSTEMD], [libsystemd], [have_libsystemd=yes], [have_libsystemd=no]) AM_CONDITIONAL(BUILDOPT_LIBSYSTEMD, test x$have_libsystemd = xyes) AM_COND_IF(BUILDOPT_LIBSYSTEMD, AC_DEFINE([HAVE_LIBSYSTEMD], 1, [Define if we have libsystemd])) AS_IF([test "x$have_libsystemd" = "xyes"], [ with_systemd=yes AC_ARG_WITH([systemdsystemunitdir], AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [Directory for systemd service files]), [], [with_systemdsystemunitdir=$($PKG_CONFIG --variable=systemdsystemunitdir systemd)]) AS_IF([test "x$with_systemdsystemunitdir" != "xno"], [ AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir]) ]) AC_ARG_WITH([systemdsystemgeneratordir], AS_HELP_STRING([--with-systemdsystemgeneratordir=DIR], [Directory for systemd generators]), [], [with_systemdsystemgeneratordir=$($PKG_CONFIG --variable=systemdsystemgeneratordir systemd)]) AS_IF([test "x$with_systemdsystemgeneratordir" != "xno"], [ AC_SUBST([systemdsystemgeneratordir], [$with_systemdsystemgeneratordir]) ]) ]) AM_CONDITIONAL(BUILDOPT_SYSTEMD, test x$with_systemd = xyes) dnl If we have both, we use the "new /var" model with ostree-system-generator AM_CONDITIONAL(BUILDOPT_SYSTEMD_AND_LIBMOUNT,[test x$with_systemd = xyes && test x$with_libmount = xyes]) AM_COND_IF(BUILDOPT_SYSTEMD_AND_LIBMOUNT, AC_DEFINE([BUILDOPT_LIBSYSTEMD_AND_LIBMOUNT], 1, [Define if systemd and libmount])) AC_ARG_WITH(builtin-grub2-mkconfig, AS_HELP_STRING([--with-builtin-grub2-mkconfig], [Use a builtin minimal grub2-mkconfig to generate a GRUB2 configuration file (default: no)]),, [with_builtin_grub2_mkconfig=no]) AM_CONDITIONAL(BUILDOPT_BUILTIN_GRUB2_MKCONFIG, test x$with_builtin_grub2_mkconfig = xyes) AM_COND_IF(BUILDOPT_BUILTIN_GRUB2_MKCONFIG, AC_DEFINE([USE_BUILTIN_GRUB2_MKCONFIG], 1, [Define if using internal ostree-grub-generator])) AC_ARG_WITH(grub2-mkconfig-path, AS_HELP_STRING([--with-grub2-mkconfig-path], [Path to grub2-mkconfig])) AS_IF([test x$with_grub2_mkconfig_path = x], [ dnl Otherwise, look for the path to the system generator. On some dnl distributions GRUB2 *-mkconfig executable has 'grub2' prefix and dnl on some 'grub'. We default to grub2-mkconfig. AC_CHECK_PROGS(GRUB2_MKCONFIG, [grub2-mkconfig grub-mkconfig], [grub2-mkconfig]) ],[GRUB2_MKCONFIG=$with_grub2_mkconfig_path]) AC_DEFINE_UNQUOTED([GRUB2_MKCONFIG_PATH], ["$GRUB2_MKCONFIG"], [The system grub2-mkconfig executible name]) AC_ARG_WITH(static-compiler, AS_HELP_STRING([--with-static-compiler], [Use the given compiler to build ostree-prepare-root statically linked (default: no)]),, [with_static_compiler=no]) AM_CONDITIONAL(BUILDOPT_USE_STATIC_COMPILER, test x$with_static_compiler != xno) AC_SUBST(STATIC_COMPILER, $with_static_compiler) dnl for tests (but we can't use asan with gjs or any introspection, dnl see https://github.com/google/sanitizers/wiki/AddressSanitizerAsDso for more info) AS_IF([test "x$found_introspection" = xyes && test x$using_asan != xyes], [ AC_PATH_PROG(GJS, [gjs]) if test -n "$GJS"; then have_gjs=yes else have_gjs=no fi ], [have_gjs=no]) AM_CONDITIONAL(BUILDOPT_GJS, test x$have_gjs = xyes) # Do we enable building experimental (non-stable) API? # The OSTREE_ENABLE_EXPERIMENTAL_API #define is used internally and in public # headers, so any consumer of libostree who wants to use experimental API must # #define OSTREE_ENABLE_EXPERIMENTAL_API 1 # before including libostree headers. This means the name in the AC_DEFINE below # is public API. AC_ARG_ENABLE([experimental-api], [AS_HELP_STRING([--enable-experimental-api], [Enable unstable experimental API in libostree [default=no]])],, [enable_experimental_api=no]) AS_IF([test x$enable_experimental_api = xyes], [AC_DEFINE([OSTREE_ENABLE_EXPERIMENTAL_API],[1],[Define if experimental API should be enabled]) OSTREE_FEATURES="$OSTREE_FEATURES experimental"] ) AM_CONDITIONAL([ENABLE_EXPERIMENTAL_API],[test x$enable_experimental_api = xyes]) AM_CONDITIONAL([BUILDOPT_IS_DEVEL_BUILD],[test x$is_release_build != xyes]) AM_COND_IF([BUILDOPT_IS_DEVEL_BUILD], AC_DEFINE([BUILDOPT_IS_DEVEL_BUILD], [1], [Define if doing a development build]) release_build_type=devel, release_build_type=release) OSTREE_FEATURES="$OSTREE_FEATURES $release_build_type" AC_CONFIG_FILES([ Makefile apidoc/Makefile src/libostree/ostree-1.pc src/libostree/ostree-version.h ]) AC_OUTPUT echo " libOSTree $VERSION ($release_build_type) =============== introspection: $found_introspection Rust (internal oxidation): $rust_debug_release rofiles-fuse: $enable_rofiles_fuse HTTP backend: $fetcher_backend \"ostree trivial-httpd\": $enable_trivial_httpd_cmdline SELinux: $with_selinux OpenSSL libcrypto (checksums): $with_openssl systemd: $have_libsystemd libmount: $with_libmount libarchive (parse tar files directly): $with_libarchive static deltas: yes (always enabled now) O_TMPFILE: $enable_otmpfile wrpseudo-compat: $enable_wrpseudo_compat man pages (xsltproc): $enable_man api docs (gtk-doc): $enable_gtk_doc installed tests: $enable_installed_tests gjs-based tests: $have_gjs dracut: $with_dracut mkinitcpio: $with_mkinitcpio Static compiler for ostree-prepare-root: $with_static_compiler Experimental API $enable_experimental_api" AS_IF([test x$with_builtin_grub2_mkconfig = xyes], [ echo " builtin grub2-mkconfig (instead of system): $with_builtin_grub2_mkconfig" ], [ echo " grub2-mkconfig path: $GRUB2_MKCONFIG" ]) echo ""
1
11,784
Can we add a `--with-bashcompdir` here to override this? I'd like to be able to do unprivileged builds.
ostreedev-ostree
c
@@ -0,0 +1,13 @@ +package cstor + +import ( + "github.com/openebs/maya/cmd/cstor-volume-grpc/app/command" +) + +func Snapshot(volName, snapName, targetIP string) (interface{}, error) { + return command.CreateSnapshot(volName, snapName, targetIP) +} + +func SnapshotDelete(volName, snapName, targetIP string) (interface{}, error) { + return command.DestroySnapshot(volName, snapName, targetIP) +}
1
1
9,685
We will be removing this maya/volume package completely. Do not use this.
openebs-maya
go
@@ -67,7 +67,7 @@ static train_result call_daal_kernel(const context_gpu& ctx, daal::algorithms::classifier::ModelPtr model_ptr(new daal_knn::Model(column_count)); if (!model_ptr) { - throw bad_alloc(); + throw host_bad_alloc(); } auto knn_model = static_cast<daal_knn::Model*>(model_ptr.get());
1
/******************************************************************************* * Copyright 2020 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #define DAAL_SYCL_INTERFACE #define DAAL_SYCL_INTERFACE_USM #define DAAL_SYCL_INTERFACE_REVERSED_RANGE #include <src/algorithms/k_nearest_neighbors/oneapi/bf_knn_classification_model_ucapi_impl.h> #include <src/algorithms/k_nearest_neighbors/oneapi/bf_knn_classification_train_kernel_ucapi.h> #include "oneapi/dal/algo/knn/backend/gpu/train_kernel.hpp" #include "oneapi/dal/algo/knn/backend/model_interop.hpp" #include "oneapi/dal/backend/interop/common_dpc.hpp" #include "oneapi/dal/backend/interop/error_converter.hpp" #include "oneapi/dal/backend/interop/table_conversion.hpp" #include "oneapi/dal/table/row_accessor.hpp" namespace oneapi::dal::knn::backend { using dal::backend::context_gpu; namespace daal_knn = daal::algorithms::bf_knn_classification; namespace interop = dal::backend::interop; template <typename Float> using daal_knn_brute_force_kernel_t = daal_knn::training::internal::KNNClassificationTrainKernelUCAPI<Float>; using daal_interop_model_t = detail::model_impl::interop_model; template <typename Float> static train_result call_daal_kernel(const context_gpu& ctx, const descriptor_base& desc, const table& data, const table& labels) { auto& queue = ctx.get_queue(); interop::execution_context_guard guard(queue); const std::int64_t row_count = data.get_row_count(); const std::int64_t column_count = data.get_column_count(); auto arr_data = row_accessor<const Float>{ data }.pull(queue); auto arr_labels = row_accessor<const Float>{ labels }.pull(queue); const auto daal_data = interop::convert_to_daal_sycl_homogen_table(queue, arr_data, row_count, column_count); const auto daal_labels = interop::convert_to_daal_sycl_homogen_table(queue, arr_labels, row_count, 1); daal_knn::Parameter daal_parameter( desc.get_class_count(), desc.get_neighbor_count(), desc.get_data_use_in_model() ? daal_knn::doUse : daal_knn::doNotUse); daal::algorithms::classifier::ModelPtr model_ptr(new daal_knn::Model(column_count)); if (!model_ptr) { throw bad_alloc(); } auto knn_model = static_cast<daal_knn::Model*>(model_ptr.get()); knn_model->impl()->setData<Float>(daal_data, desc.get_data_use_in_model()); knn_model->impl()->setLabels<Float>(daal_labels, desc.get_data_use_in_model()); interop::status_to_exception( daal_knn_brute_force_kernel_t<Float>().compute(daal_data.get(), daal_labels.get(), knn_model, daal_parameter, *daal_parameter.engine.get())); auto interop = new daal_interop_model_t(model_ptr); const auto model_impl = std::make_shared<detail::model_impl>(interop); return train_result().set_model(dal::detail::pimpl_accessor::make<model>(model_impl)); } template <typename Float> static train_result train(const context_gpu& ctx, const descriptor_base& desc, const train_input& input) { return call_daal_kernel<Float>(ctx, desc, input.get_data(), input.get_labels()); } template <typename Float> struct train_kernel_gpu<Float, method::brute_force> { train_result operator()(const context_gpu& ctx, const descriptor_base& desc, const train_input& input) const { return train<Float>(ctx, desc, input); } }; template struct train_kernel_gpu<float, method::brute_force>; template struct train_kernel_gpu<double, method::brute_force>; } // namespace oneapi::dal::knn::backend
1
24,355
Why you think that `model_ptr` will be `nullptr` when memory allocation is failed? I think `new daal_knn::Model(column_count)` would throw `std::bad_alloc` if `operator new` is not overloaded. Is it overloaded by DAAL in the way it simply returns `nullptr` in case of bad allocation?
oneapi-src-oneDAL
cpp
@@ -42,14 +42,15 @@ type MinerCreateResult struct { var minerCreateCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ - Tagline: "Create a new file miner with <pledge> sectors and <collateral> FIL", + Tagline: "Create a new file miner with <collateral> FIL", ShortDescription: `Issues a new message to the network to create the miner, then waits for the message to be mined as this is required to return the address of the new miner. -Collateral must be greater than 0.001 FIL per pledged sector.`, +Collateral will be committed at the rate of 0.001FIL per sector. When the +miner's collateral drops below 0.001FIL, the miner will not be able to commit +additional sectors.`, }, Arguments: []cmdkit.Argument{ - cmdkit.StringArg("pledge", true, false, "The size of the pledge (in sectors) for the miner"), - cmdkit.StringArg("collateral", true, false, "The amount of collateral in FIL to be sent (minimum 0.001 FIL per sector)"), + cmdkit.StringArg("collateral", true, false, "The amount of collateral, in FIL"), }, Options: []cmdkit.Option{ cmdkit.StringOption("from", "Address to send from"),
1
package commands import ( "encoding/json" "fmt" "io" "math/big" "strconv" "github.com/ipfs/go-cid" "github.com/ipfs/go-ipfs-cmdkit" "github.com/ipfs/go-ipfs-cmds" "github.com/libp2p/go-libp2p-peer" "github.com/pkg/errors" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/porcelain" "github.com/filecoin-project/go-filecoin/protocol/storage/storagedeal" "github.com/filecoin-project/go-filecoin/types" ) var minerCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Manage a single miner actor", }, Subcommands: map[string]*cmds.Command{ "create": minerCreateCmd, "owner": minerOwnerCmd, "power": minerPowerCmd, "set-price": minerSetPriceCmd, "update-peerid": minerUpdatePeerIDCmd, "list-deals": minerListDealsCmd, }, } // MinerCreateResult is the type returned when creating a miner. type MinerCreateResult struct { Address address.Address GasUsed types.GasUnits Preview bool } var minerCreateCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Create a new file miner with <pledge> sectors and <collateral> FIL", ShortDescription: `Issues a new message to the network to create the miner, then waits for the message to be mined as this is required to return the address of the new miner. Collateral must be greater than 0.001 FIL per pledged sector.`, }, Arguments: []cmdkit.Argument{ cmdkit.StringArg("pledge", true, false, "The size of the pledge (in sectors) for the miner"), cmdkit.StringArg("collateral", true, false, "The amount of collateral in FIL to be sent (minimum 0.001 FIL per sector)"), }, Options: []cmdkit.Option{ cmdkit.StringOption("from", "Address to send from"), cmdkit.StringOption("peerid", "Base58-encoded libp2p peer ID that the miner will operate"), priceOption, limitOption, previewOption, }, Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { var err error fromAddr, err := optionalAddr(req.Options["from"]) if err != nil { return err } var pid peer.ID peerid := req.Options["peerid"] if peerid != nil { pid, err = peer.IDB58Decode(peerid.(string)) if err != nil { return errors.Wrap(err, "invalid peer id") } } if pid == "" { pid = GetPorcelainAPI(env).NetworkGetPeerID() } pledge, err := strconv.ParseUint(req.Arguments[0], 10, 64) if err != nil { return ErrInvalidPledge } collateral, ok := types.NewAttoFILFromFILString(req.Arguments[1]) if !ok { return ErrInvalidCollateral } gasPrice, gasLimit, preview, err := parseGasOptions(req) if err != nil { return err } if preview { usedGas, err := GetPorcelainAPI(env).MinerPreviewCreate( req.Context, fromAddr, pledge, pid, collateral, ) if err != nil { return err } return re.Emit(&MinerCreateResult{ Address: address.Undef, GasUsed: usedGas, Preview: true, }) } addr, err := GetPorcelainAPI(env).MinerCreate( req.Context, fromAddr, gasPrice, gasLimit, pledge, pid, collateral, ) if err != nil { return errors.Wrap(err, "Could not create miner. Please consult the documentation to setup your wallet and genesis block correctly") } return re.Emit(&MinerCreateResult{ Address: *addr, GasUsed: types.NewGasUnits(0), Preview: false, }) }, Type: &MinerCreateResult{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, res *MinerCreateResult) error { if res.Preview { output := strconv.FormatUint(uint64(res.GasUsed), 10) _, err := w.Write([]byte(output)) return err } return PrintString(w, res.Address) }), }, } // MinerSetPriceResult is the return type for miner set-price command type MinerSetPriceResult struct { GasUsed types.GasUnits MinerSetPriceResponse porcelain.MinerSetPriceResponse Preview bool } var minerSetPriceCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Set the minimum price for storage", ShortDescription: `Sets the mining.minimumPrice in config and creates a new ask for the given price. This command waits for the ask to be mined.`, }, Arguments: []cmdkit.Argument{ cmdkit.StringArg("storageprice", true, false, "The new price of storage in FIL per byte per block"), cmdkit.StringArg("expiry", true, false, "How long this ask is valid for in blocks"), }, Options: []cmdkit.Option{ cmdkit.StringOption("from", "Address to send from"), cmdkit.StringOption("miner", "The address of the miner owning the ask"), priceOption, limitOption, previewOption, }, Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { price, ok := types.NewAttoFILFromFILString(req.Arguments[0]) if !ok { return ErrInvalidPrice } fromAddr, err := optionalAddr(req.Options["from"]) if err != nil { return err } var minerAddr address.Address if req.Options["miner"] != nil { minerAddr, err = address.NewFromString(req.Options["miner"].(string)) if err != nil { return errors.Wrap(err, "miner must be an address") } } expiry, ok := big.NewInt(0).SetString(req.Arguments[1], 10) if !ok { return fmt.Errorf("expiry must be a valid integer") } gasPrice, gasLimit, preview, err := parseGasOptions(req) if err != nil { return err } if preview { usedGas, err := GetPorcelainAPI(env).MinerPreviewSetPrice( req.Context, fromAddr, minerAddr, price, expiry) if err != nil { return err } return re.Emit(&MinerSetPriceResult{ GasUsed: usedGas, Preview: true, MinerSetPriceResponse: porcelain.MinerSetPriceResponse{}, }) } res, err := GetPorcelainAPI(env).MinerSetPrice( req.Context, fromAddr, minerAddr, gasPrice, gasLimit, price, expiry) if err != nil { return err } return re.Emit(&MinerSetPriceResult{ GasUsed: types.NewGasUnits(0), Preview: false, MinerSetPriceResponse: res, }) }, Type: &MinerSetPriceResult{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, res *MinerSetPriceResult) error { if res.Preview { output := strconv.FormatUint(uint64(res.GasUsed), 10) _, err := w.Write([]byte(output)) return err } _, err := fmt.Fprintf(w, `Set price for miner %s to %s. Published ask, cid: %s. Ask confirmed on chain in block: %s. `, res.MinerSetPriceResponse.MinerAddr.String(), res.MinerSetPriceResponse.Price.String(), res.MinerSetPriceResponse.AddAskCid.String(), res.MinerSetPriceResponse.BlockCid.String(), ) return err }), }, } // MinerUpdatePeerIDResult is the return type for miner update-peerid command type MinerUpdatePeerIDResult struct { Cid cid.Cid GasUsed types.GasUnits Preview bool } var minerUpdatePeerIDCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Change the libp2p identity that a miner is operating", ShortDescription: `Issues a new message to the network to update the miner's libp2p identity.`, }, Arguments: []cmdkit.Argument{ cmdkit.StringArg("address", true, false, "Miner address to update peer ID for"), cmdkit.StringArg("peerid", true, false, "Base58-encoded libp2p peer ID that the miner will operate"), }, Options: []cmdkit.Option{ cmdkit.StringOption("from", "Address to send from"), priceOption, limitOption, previewOption, }, Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { minerAddr, err := address.NewFromString(req.Arguments[0]) if err != nil { return err } fromAddr, err := optionalAddr(req.Options["from"]) if err != nil { return err } newPid, err := peer.IDB58Decode(req.Arguments[1]) if err != nil { return err } gasPrice, gasLimit, preview, err := parseGasOptions(req) if err != nil { return err } if preview { usedGas, err := GetPorcelainAPI(env).MessagePreview( req.Context, fromAddr, minerAddr, "updatePeerID", newPid, ) if err != nil { return err } return re.Emit(&MinerUpdatePeerIDResult{ Cid: cid.Cid{}, GasUsed: usedGas, Preview: true, }) } c, err := GetPorcelainAPI(env).MessageSendWithDefaultAddress( req.Context, fromAddr, minerAddr, nil, gasPrice, gasLimit, "updatePeerID", newPid, ) if err != nil { return err } return re.Emit(&MinerUpdatePeerIDResult{ Cid: c, GasUsed: types.NewGasUnits(0), Preview: false, }) }, Type: &MinerUpdatePeerIDResult{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, res *MinerUpdatePeerIDResult) error { if res.Preview { output := strconv.FormatUint(uint64(res.GasUsed), 10) _, err := w.Write([]byte(output)) return err } return PrintString(w, res.Cid) }), }, } var minerOwnerCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Show the actor address of <miner>", ShortDescription: `Given <miner> miner address, output the address of the actor that owns the miner.`, }, Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { minerAddr, err := optionalAddr(req.Arguments[0]) if err != nil { return err } bytes, err := GetPorcelainAPI(env).MessageQuery( req.Context, address.Undef, minerAddr, "getOwner", ) if err != nil { return err } ownerAddr, err := address.NewFromBytes(bytes[0]) if err != nil { return err } return re.Emit(&ownerAddr) }, Arguments: []cmdkit.Argument{ cmdkit.StringArg("miner", true, false, "The address of the miner"), }, Type: address.Address{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, a *address.Address) error { return PrintString(w, a) }), }, } var minerPowerCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Get the power of a miner versus the total storage market power", ShortDescription: `Check the current power of a given miner and total power of the storage market. Values will be output as a ratio where the first number is the miner power and second is the total market power.`, }, Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { minerAddr, err := optionalAddr(req.Arguments[0]) if err != nil { return err } bytes, err := GetPorcelainAPI(env).MessageQuery( req.Context, address.Undef, minerAddr, "getPower", ) if err != nil { return err } power := types.NewBytesAmountFromBytes(bytes[0]) bytes, err = GetPorcelainAPI(env).MessageQuery( req.Context, address.Undef, address.StorageMarketAddress, "getTotalStorage", ) if err != nil { return err } total := types.NewBytesAmountFromBytes(bytes[0]) str := fmt.Sprintf("%s / %s", power, total) // nolint: govet return re.Emit(str) }, Arguments: []cmdkit.Argument{ cmdkit.StringArg("miner", true, false, "The address of the miner"), }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, a string) error { _, err := fmt.Fprintln(w, a) return err }), }, } type minerListDealResult struct { Miner address.Address `json:"minerAddress"` PieceCid cid.Cid `json:"pieceCid"` ProposalCid cid.Cid `json:"proposalCid"` State storagedeal.State `json:"state"` } var minerListDealsCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "List all deals received by the miner", ShortDescription: ` Lists all recorded deals received by the miner from clients on the network. This may include pending deals, active deals, finished deals and rejected deals. `, }, Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { dealsCh, err := GetPorcelainAPI(env).DealMinerLs(req.Context) if err != nil { return err } for deal := range dealsCh { if deal.Err != nil { return deal.Err } out := &minerListDealResult{ Miner: deal.Deal.Miner, PieceCid: deal.Deal.Proposal.PieceRef, ProposalCid: deal.Deal.Response.ProposalCid, State: deal.Deal.Response.State, } if err = re.Emit(out); err != nil { return err } } return nil }, Type: minerListDealResult{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, res *minerListDealResult) error { encoder := json.NewEncoder(w) encoder.SetIndent("", "\t") return encoder.Encode(res) }), }, }
1
19,503
Where does this 0.001 come from? Is this from the network collateral cost function, which happens to be hardcoded right now? I think it's going to rot very quickly: it depends on sector size and (probably) block height.
filecoin-project-venus
go
@@ -0,0 +1,5 @@ +# ----------------------------------------------------------------------------- +# The CodeChecker Infrastructure +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# -----------------------------------------------------------------------------
1
1
11,668
Please update the license header in each source file. For more information see: #2697
Ericsson-codechecker
c
@@ -144,8 +144,9 @@ class HttpClient { * @param {function(!Error)} onError The function to call if the request fails. * @param {?string=} opt_data The data to send with the request. * @param {?RequestOptions=} opt_proxy The proxy server to use for the request. + * @param {!number=} retries The current number of retries. */ -function sendRequest(options, onOk, onError, opt_data, opt_proxy) { +function sendRequest(options, onOk, onError, opt_data, opt_proxy, retries) { var hostname = options.hostname; var port = options.port;
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. /** * @fileoverview Defines an {@linkplain cmd.Executor command executor} that * communicates with a remote end using HTTP + JSON. */ 'use strict'; const http = require('http'); const https = require('https'); const url = require('url'); const httpLib = require('../lib/http'); /** * @typedef {{protocol: (?string|undefined), * auth: (?string|undefined), * hostname: (?string|undefined), * host: (?string|undefined), * port: (?string|undefined), * path: (?string|undefined), * pathname: (?string|undefined)}} */ var RequestOptions; /** * @param {string} aUrl The request URL to parse. * @return {RequestOptions} The request options. * @throws {Error} if the URL does not include a hostname. */ function getRequestOptions(aUrl) { let options = url.parse(aUrl); if (!options.hostname) { throw new Error('Invalid URL: ' + aUrl); } // Delete the search and has portions as they are not used. options.search = null; options.hash = null; options.path = options.pathname; return options; } /** * A basic HTTP client used to send messages to a remote end. * * @implements {httpLib.Client} */ class HttpClient { /** * @param {string} serverUrl URL for the WebDriver server to send commands to. * @param {http.Agent=} opt_agent The agent to use for each request. * Defaults to `http.globalAgent`. * @param {?string=} opt_proxy The proxy to use for the connection to the * server. Default is to use no proxy. */ constructor(serverUrl, opt_agent, opt_proxy) { /** @private {http.Agent} */ this.agent_ = opt_agent || null; /** * Base options for each request. * @private {RequestOptions} */ this.options_ = getRequestOptions(serverUrl); /** * @private {?RequestOptions} */ this.proxyOptions_ = opt_proxy ? getRequestOptions(opt_proxy) : null; } /** @override */ send(httpRequest) { let data; let headers = {}; httpRequest.headers.forEach(function(value, name) { headers[name] = value; }); headers['Content-Length'] = 0; if (httpRequest.method == 'POST' || httpRequest.method == 'PUT') { data = JSON.stringify(httpRequest.data); headers['Content-Length'] = Buffer.byteLength(data, 'utf8'); headers['Content-Type'] = 'application/json;charset=UTF-8'; } let path = this.options_.path; if (path.endsWith('/') && httpRequest.path.startsWith('/')) { path += httpRequest.path.substring(1); } else { path += httpRequest.path; } let parsedPath = url.parse(path); let options = { agent: this.agent_ || null, method: httpRequest.method, auth: this.options_.auth, hostname: this.options_.hostname, port: this.options_.port, protocol: this.options_.protocol, path: parsedPath.path, pathname: parsedPath.pathname, search: parsedPath.search, hash: parsedPath.hash, headers, }; return new Promise((fulfill, reject) => { sendRequest(options, fulfill, reject, data, this.proxyOptions_); }); } } /** * Sends a single HTTP request. * @param {!Object} options The request options. * @param {function(!httpLib.Response)} onOk The function to call if the * request succeeds. * @param {function(!Error)} onError The function to call if the request fails. * @param {?string=} opt_data The data to send with the request. * @param {?RequestOptions=} opt_proxy The proxy server to use for the request. */ function sendRequest(options, onOk, onError, opt_data, opt_proxy) { var hostname = options.hostname; var port = options.port; if (opt_proxy) { let proxy = /** @type {RequestOptions} */(opt_proxy); // RFC 2616, section 5.1.2: // The absoluteURI form is REQUIRED when the request is being made to a // proxy. let absoluteUri = url.format(options); // RFC 2616, section 14.23: // An HTTP/1.1 proxy MUST ensure that any request message it forwards does // contain an appropriate Host header field that identifies the service // being requested by the proxy. let targetHost = options.hostname if (options.port) { targetHost += ':' + options.port; } // Update the request options with our proxy info. options.headers['Host'] = targetHost; options.path = absoluteUri; options.host = proxy.host; options.hostname = proxy.hostname; options.port = proxy.port; if (proxy.auth) { options.headers['Proxy-Authorization'] = 'Basic ' + new Buffer(proxy.auth).toString('base64'); } } let requestFn = options.protocol === 'https:' ? https.request : http.request; var request = requestFn(options, function onResponse(response) { if (response.statusCode == 302 || response.statusCode == 303) { try { var location = url.parse(response.headers['location']); } catch (ex) { onError(Error( 'Failed to parse "Location" header for server redirect: ' + ex.message + '\nResponse was: \n' + new httpLib.Response(response.statusCode, response.headers, ''))); return; } if (!location.hostname) { location.hostname = hostname; location.port = port; } request.abort(); sendRequest({ method: 'GET', protocol: location.protocol || options.protocol, hostname: location.hostname, port: location.port, path: location.path, pathname: location.pathname, search: location.search, hash: location.hash, headers: { 'Accept': 'application/json; charset=utf-8' } }, onOk, onError, undefined, opt_proxy); return; } var body = []; response.on('data', body.push.bind(body)); response.on('end', function() { var resp = new httpLib.Response( /** @type {number} */(response.statusCode), /** @type {!Object<string>} */(response.headers), body.join('').replace(/\0/g, '')); onOk(resp); }); }); request.on('error', function(e) { if (e.code === 'ECONNRESET') { setTimeout(function() { sendRequest(options, onOk, onError, opt_data, opt_proxy); }, 15); } else { var message = e.message; if (e.code) { message = e.code + ' ' + message; } onError(new Error(message)); } }); if (opt_data) { request.write(opt_data); } request.end(); } // PUBLIC API exports.Executor = httpLib.Executor; exports.HttpClient = HttpClient; exports.Request = httpLib.Request; exports.Response = httpLib.Response;
1
14,969
!numbers -> numbers
SeleniumHQ-selenium
rb
@@ -64,6 +64,6 @@ namespace AutoRest.Swagger.Validation.Core /// <summary> /// The collection of rules that apply to all properties regardless of other rules. /// </summary> - public static IEnumerable<Rule> UniversalRules = new[] { new NoControlCharacters() }; + public static IEnumerable<Rule> UniversalRules = new List<Rule>(); } }
1
using System; using System.Collections.Generic; using System.Linq; using System.Reflection; using AutoRest.Core.Utilities.Collections; using AutoRest.Core.Utilities; namespace AutoRest.Swagger.Validation.Core { internal static class RulesExtensions { /// <summary> /// Gets an enumerable of properties for <paramref name="entity" /> that can be validated /// </summary> /// <param name="entity">The object to get properties for</param> /// <returns></returns> internal static IEnumerable<PropertyInfo> GetValidatableProperties(this object entity) => entity.GetType().GetProperties(BindingFlags.FlattenHierarchy | BindingFlags.Public | BindingFlags.Instance) ?? Enumerable.Empty<PropertyInfo>(); /// <summary> /// Properties of type object can cause infinite iteration if recursively traversed /// </summary> /// <param name="prop"></param> /// <returns></returns> internal static bool IsTraversableProperty(this PropertyInfo prop) => prop.PropertyType != typeof(object); /// <summary> /// Determines if a dictionary's elements should be recursively traversed /// Dictionaries where there isn't type information for the value type should not be /// traversed, since there isn't enough information to prevent infinite traversal /// </summary> /// <param name="entity">The object to check</param> /// <returns></returns> internal static bool IsTraversableDictionary(this object entity) { if (entity == null) { return false; } // Dictionaries of type <string, object> cannot be traversed, because the object could be infinitely deep. // We only want to validate objects that have strong typing for the value type var dictType = entity.GetType(); return dictType.IsGenericType() && dictType.GenericTypeArguments.Count() >= 2 && dictType.GenericTypeArguments[1] != typeof(object); } public static IEnumerable<Rule> GetValidationRules(this PropertyInfo property) { var propertyRules = property.GetCustomAttributes<RuleAttribute>(true).Select(each => each.Rule).ReEnumerable(); return propertyRules.Concat(UniversalRules).ReEnumerable(); } public static IEnumerable<Rule> GetValidationCollectionRules(this PropertyInfo property) { var collectionRules = property.GetCustomAttributes<CollectionRuleAttribute>(true).Select(each => each.Rule).ReEnumerable(); return collectionRules.Concat(UniversalRules).ReEnumerable(); } public static IEnumerable<Rule> GetValidationRules(this Type type) => type.GetCustomAttributes<RuleAttribute>(true).Select(each => each.Rule).ReEnumerable(); /// <summary> /// The collection of rules that apply to all properties regardless of other rules. /// </summary> public static IEnumerable<Rule> UniversalRules = new[] { new NoControlCharacters() }; } }
1
24,948
so we don't have universal rules afterwards? In that case I'd remove this entirely (and maybe some places where it's used get cleaner) since we shouldn't ever add a universal rule. Not only because it's way simpler in TS, I'm also just super skeptical about "universal". In case of this rule, even control characters may have their place in Swaggers, e.g. in `examples` sections where you can have raw response bodies where control characters are perfectly valid.
Azure-autorest
java
@@ -115,7 +115,8 @@ module.exports = function exec(command, opt_options) { var proc = childProcess.spawn(command, options.args || [], { env: options.env || process.env, - stdio: options.stdio || 'ignore' + stdio: options.stdio || 'ignore', + detached: true }); // This process should not wait on the spawned child, however, we do
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. 'use strict'; const childProcess = require('child_process'); /** * A hash with configuration options for an executed command. * * - `args` - Command line arguments. * - `env` - Command environment; will inherit from the current process if * missing. * - `stdio` - IO configuration for the spawned server process. For more * information, refer to the documentation of `child_process.spawn`. * * @typedef {{ * args: (!Array<string>|undefined), * env: (!Object<string, string>|undefined), * stdio: (string|!Array<string|number|!stream.Stream|null|undefined>| * undefined) * }} */ var Options; /** * Describes a command's termination conditions. */ class Result { /** * @param {?number} code The exit code, or {@code null} if the command did not * exit normally. * @param {?string} signal The signal used to kill the command, or * {@code null}. */ constructor(code, signal) { /** @type {?number} */ this.code = code; /** @type {?string} */ this.signal = signal; } /** @override */ toString() { return `Result(code=${this.code}, signal=${this.signal})`; } } const COMMAND_RESULT = /** !WeakMap<!Command, !Promise<!Result>> */new WeakMap; const KILL_HOOK = /** !WeakMap<!Command, function(string)> */new WeakMap; /** * Represents a command running in a sub-process. */ class Command { /** * @param {!Promise<!Result>} result The command result. * @param {function(string)} onKill The function to call when {@link #kill()} * is called. */ constructor(result, onKill) { COMMAND_RESULT.set(this, result); KILL_HOOK.set(this, onKill); } /** * @return {!Promise<!Result>} A promise for the result of this * command. */ result() { return /** @type {!Promise<!Result>} */(COMMAND_RESULT.get(this)); } /** * Sends a signal to the underlying process. * @param {string=} opt_signal The signal to send; defaults to `SIGTERM`. */ kill(opt_signal) { KILL_HOOK.get(this)(opt_signal || 'SIGTERM'); } } // PUBLIC API /** * Spawns a child process. The returned {@link Command} may be used to wait * for the process result or to send signals to the process. * * @param {string} command The executable to spawn. * @param {Options=} opt_options The command options. * @return {!Command} The launched command. */ module.exports = function exec(command, opt_options) { var options = opt_options || {}; var proc = childProcess.spawn(command, options.args || [], { env: options.env || process.env, stdio: options.stdio || 'ignore' }); // This process should not wait on the spawned child, however, we do // want to ensure the child is killed when this process exits. proc.unref(); process.once('exit', onProcessExit); let result = new Promise(resolve => { proc.once('exit', (code, signal) => { proc = null; process.removeListener('exit', onProcessExit); resolve(new Result(code, signal)); }); }); return new Command(result, killCommand); function onProcessExit() { killCommand('SIGTERM'); } function killCommand(signal) { process.removeListener('exit', onProcessExit); if (proc) { proc.kill(signal); proc = null; } } }; // Exported to improve generated API documentation. module.exports.Command = Command; /** @typedef {!Options} */ module.exports.Options = Options; module.exports.Result = Result;
1
13,332
You're welcome to surface this option through the API, but I'm not going to make this the default behavior.
SeleniumHQ-selenium
py
@@ -76,10 +76,10 @@ func buildBinary(source, target string) error { if !ok { targetArch = runtime.GOARCH } - return buildBinaryFor(source, target, targetOS, targetArch) + return buildBinaryFor(source, target, targetOS, targetArch, false) } -func buildBinaryFor(source, target, targetOS, targetArch string) error { +func buildBinaryFor(source, target, targetOS, targetArch string, buildStatic bool) error { log.Info().Msgf("Building %s -> %s %s/%s", source, target, targetOS, targetArch) buildDir, err := filepath.Abs(path.Join("build", target))
1
/* * Copyright (C) 2020 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package packages import ( "fmt" "os" "path" "path/filepath" "runtime" "strings" "github.com/magefile/mage/sh" "github.com/mysteriumnetwork/go-ci/env" "github.com/mysteriumnetwork/node/logconfig" "github.com/mysteriumnetwork/node/utils/fileutil" "github.com/rs/zerolog/log" ) // Build builds the project. Like go tool, it supports cross-platform build with env vars: GOOS, GOARCH. func Build() error { logconfig.Bootstrap() if err := buildBinary(path.Join("cmd", "mysterium_node", "mysterium_node.go"), "myst"); err != nil { return err } if err := copyConfig("myst"); err != nil { return err } if err := buildBinary(path.Join("cmd", "supervisor", "supervisor.go"), "myst_supervisor"); err != nil { return err } return nil } func linkerFlags() (flags []string) { if env.Str(env.BuildBranch) != "" { flags = append(flags, "-X", fmt.Sprintf("'github.com/mysteriumnetwork/node/metadata.BuildBranch=%s'", env.Str(env.BuildBranch))) } if env.Str("BUILD_COMMIT") != "" { flags = append(flags, "-X", fmt.Sprintf("'github.com/mysteriumnetwork/node/metadata.BuildCommit=%s'", env.Str("BUILD_COMMIT"))) } if env.Str(env.BuildNumber) != "" { flags = append(flags, "-X", fmt.Sprintf("'github.com/mysteriumnetwork/node/metadata.BuildNumber=%s'", env.Str(env.BuildNumber))) } if env.Str(env.BuildVersion) != "" { flags = append(flags, "-X", fmt.Sprintf("'github.com/mysteriumnetwork/node/metadata.Version=%s'", env.Str(env.BuildVersion))) } return flags } func buildCrossBinary(os, arch string) error { return sh.Run("bin/build_xgo", os+"/"+arch) } func buildBinary(source, target string) error { targetOS, ok := os.LookupEnv("GOOS") if !ok { targetOS = runtime.GOOS } targetArch, ok := os.LookupEnv("GOARCH") if !ok { targetArch = runtime.GOARCH } return buildBinaryFor(source, target, targetOS, targetArch) } func buildBinaryFor(source, target, targetOS, targetArch string) error { log.Info().Msgf("Building %s -> %s %s/%s", source, target, targetOS, targetArch) buildDir, err := filepath.Abs(path.Join("build", target)) if err != nil { return err } var flags = []string{"build"} if env.Bool("FLAG_RACE") { flags = append(flags, "-race") } ldFlags := linkerFlags() flags = append(flags, fmt.Sprintf(`-ldflags=-w -s %s`, strings.Join(ldFlags, " "))) if targetOS == "windows" { target += ".exe" } flags = append(flags, "-o", path.Join(buildDir, target), source) envi := map[string]string{ "GOOS": targetOS, "GOARCH": targetArch, } return sh.RunWith(envi, "go", flags...) } func copyConfig(target string) error { dest, err := filepath.Abs(path.Join("build", target, "config")) if err != nil { return err } common, err := filepath.Abs(path.Join("bin", "package", "config", "common")) if err != nil { return err } if err := fileutil.CopyDirs(common, dest); err != nil { return err } targetOS, ok := os.LookupEnv("GOOS") if !ok { targetOS = runtime.GOOS } osSpecific, err := filepath.Abs(path.Join("bin", "package", "config", targetOS)) if err := fileutil.CopyDirs(osSpecific, dest); err != nil { return err } return nil }
1
17,071
Just a thought that any further extensions of `buildBinaryFor` signature could be done as `buildBinaryFor(..., opts... BuildOptions)` for better readability.
mysteriumnetwork-node
go
@@ -44,8 +44,11 @@ namespace OpenTelemetry.Metrics var options = new InMemoryExporterOptions(); configure?.Invoke(options); - var exporter = new InMemoryMetricExporter(exportedItems, options); - return builder.AddMetricReader(new PeriodicExportingMetricReader(exporter, options.MetricExportIntervalMilliseconds)); + + // var exporter = new InMemoryMetricExporter(exportedItems, options); + // return builder.AddMetricReader(new PeriodicExportingMetricReader(exporter, options.MetricExportIntervalMilliseconds)); + + return builder; } } }
1
// <copyright file="InMemoryExporterMetricHelperExtensions.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using OpenTelemetry.Exporter; namespace OpenTelemetry.Metrics { public static class InMemoryExporterMetricHelperExtensions { /// <summary> /// Adds InMemory exporter to the TracerProvider. /// </summary> /// <param name="builder"><see cref="MeterProviderBuilder"/> builder to use.</param> /// <param name="exportedItems">Collection which will be populated with the exported MetricItem.</param> /// <param name="configure">Exporter configuration options.</param> /// <returns>The instance of <see cref="MeterProviderBuilder"/> to chain the calls.</returns> [System.Diagnostics.CodeAnalysis.SuppressMessage("Reliability", "CA2000:Dispose objects before losing scope", Justification = "The objects should not be disposed.")] public static MeterProviderBuilder AddInMemoryExporter(this MeterProviderBuilder builder, ICollection<Metric> exportedItems, Action<InMemoryExporterOptions> configure = null) { if (builder == null) { throw new ArgumentNullException(nameof(builder)); } if (exportedItems == null) { throw new ArgumentNullException(nameof(exportedItems)); } var options = new InMemoryExporterOptions(); configure?.Invoke(options); var exporter = new InMemoryMetricExporter(exportedItems, options); return builder.AddMetricReader(new PeriodicExportingMetricReader(exporter, options.MetricExportIntervalMilliseconds)); } } }
1
21,285
I'll fix the InMemoryExporter right after this PR lands
open-telemetry-opentelemetry-dotnet
.cs
@@ -11098,7 +11098,7 @@ bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreat auto physical_device_state = GetPhysicalDeviceState(); if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) { - if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, + if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physical_device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery, "%s: surface capabilities not retrieved for this physical device", func_name)) return true;
1
/* Copyright (c) 2015-2019 The Khronos Group Inc. * Copyright (c) 2015-2019 Valve Corporation * Copyright (c) 2015-2019 LunarG, Inc. * Copyright (C) 2015-2019 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Cody Northrop <[email protected]> * Author: Michael Lentine <[email protected]> * Author: Tobin Ehlis <[email protected]> * Author: Chia-I Wu <[email protected]> * Author: Chris Forbes <[email protected]> * Author: Mark Lobodzinski <[email protected]> * Author: Ian Elliott <[email protected]> * Author: Dave Houlton <[email protected]> * Author: Dustin Graves <[email protected]> * Author: Jeremy Hayes <[email protected]> * Author: Jon Ashburn <[email protected]> * Author: Karl Schultz <[email protected]> * Author: Mark Young <[email protected]> * Author: Mike Schuchardt <[email protected]> * Author: Mike Weiblen <[email protected]> * Author: Tony Barbour <[email protected]> * Author: John Zulauf <[email protected]> * Author: Shannon McPherson <[email protected]> */ // Allow use of STL min and max functions in Windows #define NOMINMAX #include <algorithm> #include <array> #include <assert.h> #include <cmath> #include <iostream> #include <list> #include <map> #include <memory> #include <mutex> #include <set> #include <sstream> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <string> #include <valarray> #include "vk_loader_platform.h" #include "vk_dispatch_table_helper.h" #include "vk_enum_string_helper.h" #include "chassis.h" #include "convert_to_renderpass2.h" #include "core_validation.h" #include "buffer_validation.h" #include "shader_validation.h" #include "vk_layer_utils.h" // These functions are defined *outside* the core_validation namespace as their type // is also defined outside that namespace size_t PipelineLayoutCompatDef::hash() const { hash_util::HashCombiner hc; // The set number is integral to the CompatDef's distinctiveness hc << set << push_constant_ranges.get(); const auto &descriptor_set_layouts = *set_layouts_id.get(); for (uint32_t i = 0; i <= set; i++) { hc << descriptor_set_layouts[i].get(); } return hc.Value(); } bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const { if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) { return false; } if (set_layouts_id == other.set_layouts_id) { // if it's the same set_layouts_id, then *any* subset will match return true; } // They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match const auto &descriptor_set_layouts = *set_layouts_id.get(); assert(set < descriptor_set_layouts.size()); const auto &other_ds_layouts = *other.set_layouts_id.get(); assert(set < other_ds_layouts.size()); for (uint32_t i = 0; i <= set; i++) { if (descriptor_set_layouts[i] != other_ds_layouts[i]) { return false; } } return true; } using std::max; using std::string; using std::stringstream; using std::unique_ptr; using std::unordered_map; using std::unordered_set; using std::vector; // WSI Image Objects bypass usual Image Object creation methods. A special Memory // Object value will be used to identify them internally. static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1); // 2nd special memory handle used to flag object as unbound from memory static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1); // Get the global map of pending releases GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap( const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) { return qfo_release_image_barrier_map; } GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap( const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) { return qfo_release_buffer_barrier_map; } // Get the image viewstate for a given framebuffer attachment IMAGE_VIEW_STATE *ValidationStateTracker::GetAttachmentImageViewState(FRAMEBUFFER_STATE *framebuffer, uint32_t index) { assert(framebuffer && (index < framebuffer->createInfo.attachmentCount)); const VkImageView &image_view = framebuffer->createInfo.pAttachments[index]; return GetImageViewState(image_view); } EVENT_STATE *ValidationStateTracker::GetEventState(VkEvent event) { auto it = eventMap.find(event); if (it == eventMap.end()) { return nullptr; } return &it->second; } QUEUE_STATE *ValidationStateTracker::GetQueueState(VkQueue queue) { auto it = queueMap.find(queue); if (it == queueMap.end()) { return nullptr; } return &it->second; } PHYSICAL_DEVICE_STATE *ValidationStateTracker::GetPhysicalDeviceState(VkPhysicalDevice phys) { auto *phys_dev_map = ((physical_device_map.size() > 0) ? &physical_device_map : &instance_state->physical_device_map); auto it = phys_dev_map->find(phys); if (it == phys_dev_map->end()) { return nullptr; } return &it->second; } PHYSICAL_DEVICE_STATE *ValidationStateTracker::GetPhysicalDeviceState() { return physical_device_state; } // Return ptr to memory binding for given handle of specified type BINDABLE *ValidationStateTracker::GetObjectMemBinding(const VulkanTypedHandle &typed_handle) { switch (typed_handle.type) { case kVulkanObjectTypeImage: return GetImageState(VkImage(typed_handle.handle)); case kVulkanObjectTypeBuffer: return GetBufferState(VkBuffer(typed_handle.handle)); default: break; } return nullptr; } ImageSubresourceLayoutMap::InitialLayoutState::InitialLayoutState(const CMD_BUFFER_STATE &cb_state, const IMAGE_VIEW_STATE *view_state) : image_view(VK_NULL_HANDLE), aspect_mask(0), label(cb_state.debug_label) { if (view_state) { image_view = view_state->image_view; aspect_mask = view_state->create_info.subresourceRange.aspectMask; } } std::string FormatDebugLabel(const char *prefix, const LoggingLabel &label) { if (label.Empty()) return std::string(); std::string out; string_sprintf(&out, "%sVkDebugUtilsLabel(name='%s' color=[%g, %g %g, %g])", prefix, label.name.c_str(), label.color[0], label.color[1], label.color[2], label.color[3]); return out; } // the ImageLayoutMap implementation bakes in the number of valid aspects -- we have to choose the correct one at construction time template <uint32_t kThreshold> static std::unique_ptr<ImageSubresourceLayoutMap> LayoutMapFactoryByAspect(const IMAGE_STATE &image_state) { ImageSubresourceLayoutMap *map = nullptr; switch (image_state.full_range.aspectMask) { case VK_IMAGE_ASPECT_COLOR_BIT: map = new ImageSubresourceLayoutMapImpl<ColorAspectTraits, kThreshold>(image_state); break; case VK_IMAGE_ASPECT_DEPTH_BIT: map = new ImageSubresourceLayoutMapImpl<DepthAspectTraits, kThreshold>(image_state); break; case VK_IMAGE_ASPECT_STENCIL_BIT: map = new ImageSubresourceLayoutMapImpl<StencilAspectTraits, kThreshold>(image_state); break; case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT: map = new ImageSubresourceLayoutMapImpl<DepthStencilAspectTraits, kThreshold>(image_state); break; case VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT: map = new ImageSubresourceLayoutMapImpl<Multiplane2AspectTraits, kThreshold>(image_state); break; case VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT: map = new ImageSubresourceLayoutMapImpl<Multiplane3AspectTraits, kThreshold>(image_state); break; } assert(map); // We shouldn't be able to get here null unless the traits cases are incomplete return std::unique_ptr<ImageSubresourceLayoutMap>(map); } static std::unique_ptr<ImageSubresourceLayoutMap> LayoutMapFactory(const IMAGE_STATE &image_state) { std::unique_ptr<ImageSubresourceLayoutMap> map; const uint32_t kAlwaysDenseLimit = 16; // About a cacheline on deskop architectures if (image_state.full_range.layerCount <= kAlwaysDenseLimit) { // Create a dense row map map = LayoutMapFactoryByAspect<0>(image_state); } else { // Create an initially sparse row map map = LayoutMapFactoryByAspect<kAlwaysDenseLimit>(image_state); } return map; } // The const variant only need the image as it is the key for the map const ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(const CMD_BUFFER_STATE *cb_state, VkImage image) { auto it = cb_state->image_layout_map.find(image); if (it == cb_state->image_layout_map.cend()) { return nullptr; } return it->second.get(); } // The non-const variant only needs the image state, as the factory requires it to construct a new entry ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE &image_state) { auto it = cb_state->image_layout_map.find(image_state.image); if (it == cb_state->image_layout_map.end()) { // Empty slot... fill it in. auto insert_pair = cb_state->image_layout_map.insert(std::make_pair(image_state.image, LayoutMapFactory(image_state))); assert(insert_pair.second); ImageSubresourceLayoutMap *new_map = insert_pair.first->second.get(); assert(new_map); return new_map; } return it->second.get(); } void CoreChecks::AddMemObjInfo(void *object, const VkDeviceMemory mem, const VkMemoryAllocateInfo *pAllocateInfo) { assert(object != NULL); auto *mem_info = new DEVICE_MEMORY_STATE(object, mem, pAllocateInfo); memObjMap[mem] = unique_ptr<DEVICE_MEMORY_STATE>(mem_info); auto dedicated = lvl_find_in_chain<VkMemoryDedicatedAllocateInfoKHR>(pAllocateInfo->pNext); if (dedicated) { mem_info->is_dedicated = true; mem_info->dedicated_buffer = dedicated->buffer; mem_info->dedicated_image = dedicated->image; } auto export_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(pAllocateInfo->pNext); if (export_info) { mem_info->is_export = true; mem_info->export_handle_type_flags = export_info->handleTypes; } } // Create binding link between given sampler and command buffer node void CoreChecks::AddCommandBufferBindingSampler(CMD_BUFFER_STATE *cb_node, SAMPLER_STATE *sampler_state) { auto inserted = cb_node->object_bindings.emplace(sampler_state->sampler, kVulkanObjectTypeSampler); if (inserted.second) { // Only need to complete the cross-reference if this is a new item sampler_state->cb_bindings.insert(cb_node); } } // Create binding link between given image node and command buffer node void CoreChecks::AddCommandBufferBindingImage(CMD_BUFFER_STATE *cb_node, IMAGE_STATE *image_state) { // Skip validation if this image was created through WSI if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { // First update cb binding for image auto image_inserted = cb_node->object_bindings.emplace(image_state->image, kVulkanObjectTypeImage); if (image_inserted.second) { // Only need to continue if this is a new item (the rest of the work would have be done previous) image_state->cb_bindings.insert(cb_node); // Now update CB binding in MemObj mini CB list for (auto mem_binding : image_state->GetBoundMemory()) { DEVICE_MEMORY_STATE *pMemInfo = GetDevMemState(mem_binding); if (pMemInfo) { // Now update CBInfo's Mem reference list auto mem_inserted = cb_node->memObjs.insert(mem_binding); if (mem_inserted.second) { // Only need to complete the cross-reference if this is a new item pMemInfo->cb_bindings.insert(cb_node); } } } } } } // Create binding link between given image view node and its image with command buffer node void CoreChecks::AddCommandBufferBindingImageView(CMD_BUFFER_STATE *cb_node, IMAGE_VIEW_STATE *view_state) { // First add bindings for imageView auto inserted = cb_node->object_bindings.emplace(view_state->image_view, kVulkanObjectTypeImageView); if (inserted.second) { // Only need to continue if this is a new item view_state->cb_bindings.insert(cb_node); auto image_state = GetImageState(view_state->create_info.image); // Add bindings for image within imageView if (image_state) { AddCommandBufferBindingImage(cb_node, image_state); } } } // Create binding link between given buffer node and command buffer node void CoreChecks::AddCommandBufferBindingBuffer(CMD_BUFFER_STATE *cb_node, BUFFER_STATE *buffer_state) { // First update cb binding for buffer auto buffer_inserted = cb_node->object_bindings.emplace(buffer_state->buffer, kVulkanObjectTypeBuffer); if (buffer_inserted.second) { // Only need to continue if this is a new item buffer_state->cb_bindings.insert(cb_node); // Now update CB binding in MemObj mini CB list for (auto mem_binding : buffer_state->GetBoundMemory()) { DEVICE_MEMORY_STATE *pMemInfo = GetDevMemState(mem_binding); if (pMemInfo) { // Now update CBInfo's Mem reference list auto inserted = cb_node->memObjs.insert(mem_binding); if (inserted.second) { // Only need to complete the cross-reference if this is a new item pMemInfo->cb_bindings.insert(cb_node); } } } } } // Create binding link between given buffer view node and its buffer with command buffer node void CoreChecks::AddCommandBufferBindingBufferView(CMD_BUFFER_STATE *cb_node, BUFFER_VIEW_STATE *view_state) { // First add bindings for bufferView auto inserted = cb_node->object_bindings.emplace(view_state->buffer_view, kVulkanObjectTypeBufferView); if (inserted.second) { // Only need to complete the cross-reference if this is a new item view_state->cb_bindings.insert(cb_node); auto buffer_state = GetBufferState(view_state->create_info.buffer); // Add bindings for buffer within bufferView if (buffer_state) { AddCommandBufferBindingBuffer(cb_node, buffer_state); } } } // For every mem obj bound to particular CB, free bindings related to that CB void CoreChecks::ClearCmdBufAndMemReferences(CMD_BUFFER_STATE *cb_node) { if (cb_node) { if (cb_node->memObjs.size() > 0) { for (auto mem : cb_node->memObjs) { DEVICE_MEMORY_STATE *pInfo = GetDevMemState(mem); if (pInfo) { pInfo->cb_bindings.erase(cb_node); } } cb_node->memObjs.clear(); } } } // Clear a single object binding from given memory object void CoreChecks::ClearMemoryObjectBinding(const VulkanTypedHandle &typed_handle, VkDeviceMemory mem) { DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem); // This obj is bound to a memory object. Remove the reference to this object in that memory object's list if (mem_info) { mem_info->obj_bindings.erase(typed_handle); } } // ClearMemoryObjectBindings clears the binding of objects to memory // For the given object it pulls the memory bindings and makes sure that the bindings // no longer refer to the object being cleared. This occurs when objects are destroyed. void CoreChecks::ClearMemoryObjectBindings(const VulkanTypedHandle &typed_handle) { BINDABLE *mem_binding = GetObjectMemBinding(typed_handle); if (mem_binding) { if (!mem_binding->sparse) { ClearMemoryObjectBinding(typed_handle, mem_binding->binding.mem); } else { // Sparse, clear all bindings for (auto &sparse_mem_binding : mem_binding->sparse_bindings) { ClearMemoryObjectBinding(typed_handle, sparse_mem_binding.mem); } } } } // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value. bool CoreChecks::VerifyBoundMemoryIsValid(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *api_name, const char *error_code) { bool result = false; auto type_name = object_string[typed_handle.type]; if (VK_NULL_HANDLE == mem) { result = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, typed_handle.handle, error_code, "%s: %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().", api_name, report_data->FormatHandle(typed_handle).c_str(), type_name + 2); } else if (MEMORY_UNBOUND == mem) { result = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, typed_handle.handle, error_code, "%s: %s used with no memory bound and previously bound memory was freed. Memory must not be freed " "prior to this operation.", api_name, report_data->FormatHandle(typed_handle).c_str()); } return result; } // Check to see if memory was ever bound to this image bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const char *api_name, const char *error_code) { bool result = false; if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) { result = VerifyBoundMemoryIsValid(image_state->binding.mem, VulkanTypedHandle(image_state->image, kVulkanObjectTypeImage), api_name, error_code); } return result; } // Check to see if memory was bound to this buffer bool CoreChecks::ValidateMemoryIsBoundToBuffer(const BUFFER_STATE *buffer_state, const char *api_name, const char *error_code) { bool result = false; if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) { result = VerifyBoundMemoryIsValid(buffer_state->binding.mem, VulkanTypedHandle(buffer_state->buffer, kVulkanObjectTypeBuffer), api_name, error_code); } return result; } // SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object. // Corresponding valid usage checks are in ValidateSetMemBinding(). void CoreChecks::SetMemBinding(VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset, const VulkanTypedHandle &typed_handle) { assert(mem_binding); mem_binding->binding.mem = mem; mem_binding->UpdateBoundMemorySet(); // force recreation of cached set mem_binding->binding.offset = memory_offset; mem_binding->binding.size = mem_binding->requirements.size; if (mem != VK_NULL_HANDLE) { DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem); if (mem_info) { mem_info->obj_bindings.insert(typed_handle); // For image objects, make sure default memory state is correctly set // TODO : What's the best/correct way to handle this? if (kVulkanObjectTypeImage == typed_handle.type) { auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding); if (image_state) { VkImageCreateInfo ici = image_state->createInfo; if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { // TODO:: More memory state transition stuff. } } } } } } // Valid usage checks for a call to SetMemBinding(). // For NULL mem case, output warning // Make sure given object is in global object map // IF a previous binding existed, output validation error // Otherwise, add reference from objectInfo to memoryInfo // Add reference off of objInfo // TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions. bool CoreChecks::ValidateSetMemBinding(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *apiName) { bool skip = false; // It's an error to bind an object to NULL memory if (mem != VK_NULL_HANDLE) { BINDABLE *mem_binding = GetObjectMemBinding(typed_handle); assert(mem_binding); if (mem_binding->sparse) { const char *error_code = "VUID-vkBindImageMemory-image-01045"; const char *handle_type = "IMAGE"; if (typed_handle.type == kVulkanObjectTypeBuffer) { error_code = "VUID-vkBindBufferMemory-buffer-01030"; handle_type = "BUFFER"; } else { assert(typed_handle.type == kVulkanObjectTypeImage); } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), error_code, "In %s, attempting to bind %s to %s which was created with sparse memory flags " "(VK_%s_CREATE_SPARSE_*_BIT).", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(), handle_type); } DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem); if (mem_info) { DEVICE_MEMORY_STATE *prev_binding = GetDevMemState(mem_binding->binding.mem); if (prev_binding) { const char *error_code = "VUID-vkBindImageMemory-image-01044"; if (typed_handle.type == kVulkanObjectTypeBuffer) { error_code = "VUID-vkBindBufferMemory-buffer-01029"; } else { assert(typed_handle.type == kVulkanObjectTypeImage); } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), error_code, "In %s, attempting to bind %s to %s which has already been bound to %s.", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(), report_data->FormatHandle(prev_binding->mem).c_str()); } else if (mem_binding->binding.mem == MEMORY_UNBOUND) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), kVUID_Core_MemTrack_RebindObject, "In %s, attempting to bind %s to %s which was previous bound to memory that has " "since been freed. Memory bindings are immutable in " "Vulkan so this attempt to bind to new memory is not allowed.", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str()); } } } return skip; } // For NULL mem case, clear any previous binding Else... // Make sure given object is in its object map // IF a previous binding existed, update binding // Add reference from objectInfo to memoryInfo // Add reference off of object's binding info // Return VK_TRUE if addition is successful, VK_FALSE otherwise bool CoreChecks::SetSparseMemBinding(MEM_BINDING binding, const VulkanTypedHandle &typed_handle) { bool skip = VK_FALSE; // Handle NULL case separately, just clear previous binding & decrement reference if (binding.mem == VK_NULL_HANDLE) { // TODO : This should cause the range of the resource to be unbound according to spec } else { BINDABLE *mem_binding = GetObjectMemBinding(typed_handle); assert(mem_binding); if (mem_binding) { // Invalid handles are reported by object tracker, but Get returns NULL for them, so avoid SEGV here assert(mem_binding->sparse); DEVICE_MEMORY_STATE *mem_info = GetDevMemState(binding.mem); if (mem_info) { mem_info->obj_bindings.insert(typed_handle); // Need to set mem binding for this object mem_binding->sparse_bindings.insert(binding); mem_binding->UpdateBoundMemorySet(); } } } return skip; } bool CoreChecks::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name, const char *error_code, bool optional = false) { bool skip = false; if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), error_code, "%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.", cmd_name, parameter_name); } else if (queue_family_index_map.find(queue_family) == queue_family_index_map.end()) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), error_code, "%s: %s (= %" PRIu32 ") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.", cmd_name, parameter_name, queue_family); } return skip; } bool CoreChecks::ValidateQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families, const char *cmd_name, const char *array_parameter_name, const char *unique_error_code, const char *valid_error_code, bool optional = false) { bool skip = false; if (queue_families) { std::unordered_set<uint32_t> set; for (uint32_t i = 0; i < queue_family_count; ++i) { std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]"; if (set.count(queue_families[i])) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), unique_error_code, "%s: %s (=%" PRIu32 ") is not unique within %s array.", cmd_name, parameter_name.c_str(), queue_families[i], array_parameter_name); } else { set.insert(queue_families[i]); skip |= ValidateDeviceQueueFamily(queue_families[i], cmd_name, parameter_name.c_str(), valid_error_code, optional); } } } return skip; } // Check object status for selected flag state bool CoreChecks::ValidateStatus(CMD_BUFFER_STATE *pNode, CBStatusFlags status_mask, VkFlags msg_flags, const char *fail_msg, const char *msg_code) { if (!(pNode->status & status_mask)) { return log_msg(report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pNode->commandBuffer), msg_code, "%s: %s..", report_data->FormatHandle(pNode->commandBuffer).c_str(), fail_msg); } return false; } RENDER_PASS_STATE *ValidationStateTracker::GetRenderPassState(VkRenderPass renderpass) { auto it = renderPassMap.find(renderpass); if (it == renderPassMap.end()) { return nullptr; } return it->second.get(); } std::shared_ptr<RENDER_PASS_STATE> ValidationStateTracker::GetRenderPassStateSharedPtr(VkRenderPass renderpass) { auto it = renderPassMap.find(renderpass); if (it == renderPassMap.end()) { return nullptr; } return it->second; } std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(CoreChecks const *dev_data, VkDescriptorSetLayout dsLayout) { auto it = dev_data->descriptorSetLayoutMap.find(dsLayout); if (it == dev_data->descriptorSetLayoutMap.end()) { return nullptr; } return it->second; } // Return true if for a given PSO, the given state enum is dynamic, else return false static bool IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) { if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) { for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true; } } return false; } // Validate state stored as flags at time of draw call bool CoreChecks::ValidateDrawStateFlags(CMD_BUFFER_STATE *pCB, const PIPELINE_STATE *pPipe, bool indexed, const char *msg_code) { bool result = false; if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) { result |= ValidateStatus(pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic line width state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pRasterizationState && (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic depth bias state not set for this command buffer", msg_code); } if (pPipe->blendConstantsEnabled) { result |= ValidateStatus(pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic blend constants state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pDepthStencilState && (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic depth bounds state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pDepthStencilState && (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil read mask state not set for this command buffer", msg_code); result |= ValidateStatus(pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil write mask state not set for this command buffer", msg_code); result |= ValidateStatus(pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil reference state not set for this command buffer", msg_code); } if (indexed) { result |= ValidateStatus(pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT, "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code); } return result; } bool CoreChecks::LogInvalidAttachmentMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach, const char *msg, const char *caller, const char *error_code) { return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(rp1_state->renderPass), error_code, "%s: RenderPasses incompatible between %s w/ %s and %s w/ %s Attachment %u is not " "compatible with %u: %s.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(), primary_attach, secondary_attach, msg); } bool CoreChecks::ValidateAttachmentCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach, const char *caller, const char *error_code) { bool skip = false; const auto &primaryPassCI = rp1_state->createInfo; const auto &secondaryPassCI = rp2_state->createInfo; if (primaryPassCI.attachmentCount <= primary_attach) { primary_attach = VK_ATTACHMENT_UNUSED; } if (secondaryPassCI.attachmentCount <= secondary_attach) { secondary_attach = VK_ATTACHMENT_UNUSED; } if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) { return skip; } if (primary_attach == VK_ATTACHMENT_UNUSED) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "The first is unused while the second is not.", caller, error_code); return skip; } if (secondary_attach == VK_ATTACHMENT_UNUSED) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "The second is unused while the first is not.", caller, error_code); return skip; } if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different formats.", caller, error_code); } if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different samples.", caller, error_code); } if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different flags.", caller, error_code); } return skip; } bool CoreChecks::ValidateSubpassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass, const char *caller, const char *error_code) { bool skip = false; const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass]; const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass]; uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount); for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) { uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.inputAttachmentCount) { primary_input_attach = primary_desc.pInputAttachments[i].attachment; } if (i < secondary_desc.inputAttachmentCount) { secondary_input_attach = secondary_desc.pInputAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach, secondary_input_attach, caller, error_code); } uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount); for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) { uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.colorAttachmentCount) { primary_color_attach = primary_desc.pColorAttachments[i].attachment; } if (i < secondary_desc.colorAttachmentCount) { secondary_color_attach = secondary_desc.pColorAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_color_attach, secondary_color_attach, caller, error_code); if (rp1_state->createInfo.subpassCount > 1) { uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) { primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment; } if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) { secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach, secondary_resolve_attach, caller, error_code); } } uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED; if (primary_desc.pDepthStencilAttachment) { primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment; } if (secondary_desc.pDepthStencilAttachment) { secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach, secondary_depthstencil_attach, caller, error_code); return skip; } // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible. // This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and // will then feed into this function bool CoreChecks::ValidateRenderPassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *caller, const char *error_code) { bool skip = false; if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(rp1_state->renderPass), error_code, "%s: RenderPasses incompatible between %s w/ %s with a subpassCount of %u and %s w/ " "%s with a subpassCount of %u.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), rp1_state->createInfo.subpassCount, type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(), rp2_state->createInfo.subpassCount); } else { for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) { skip |= ValidateSubpassCompatibility(type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code); } } return skip; } // For given pipeline, return number of MSAA samples, or one if MSAA disabled static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) { if (pipe->graphicsPipelineCI.pMultisampleState != NULL && VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) { return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples; } return VK_SAMPLE_COUNT_1_BIT; } static void ListBits(std::ostream &s, uint32_t bits) { for (int i = 0; i < 32 && bits; i++) { if (bits & (1 << i)) { s << i; bits &= ~(1 << i); if (bits) { s << ","; } } } } // Validate draw-time state related to the PSO bool CoreChecks::ValidatePipelineDrawtimeState(LAST_BOUND_STATE const &state, const CMD_BUFFER_STATE *pCB, CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) { bool skip = false; // Verify vertex binding if (pPipeline->vertex_binding_descriptions_.size() > 0) { for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) { const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding; if ((pCB->current_draw_data.vertex_buffer_bindings.size() < (vertex_binding + 1)) || (pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer == VK_NULL_HANDLE)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_VtxIndexOutOfBounds, "%s expects that this Command Buffer's vertex binding Index %u should be set via " "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at " "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.", report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), vertex_binding, i, vertex_binding); } } // Verify vertex attribute address alignment for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) { const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i]; const auto vertex_binding = attribute_description.binding; const auto attribute_offset = attribute_description.offset; const auto attribute_format = attribute_description.format; const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding); if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) && (vertex_binding < pCB->current_draw_data.vertex_buffer_bindings.size()) && (pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer != VK_NULL_HANDLE)) { const auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride; const auto vertex_buffer_offset = pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].offset; const auto buffer_state = GetBufferState(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer); // Use only memory binding offset as base memory should be properly aligned by the driver const auto buffer_binding_address = buffer_state->binding.offset + vertex_buffer_offset; // Use 1 as vertex/instance index to use buffer stride as well const auto attrib_address = buffer_binding_address + vertex_buffer_stride + attribute_offset; uint32_t vtx_attrib_req_alignment = FormatElementSize(attribute_format); if (FormatElementIsTexel(attribute_format)) { vtx_attrib_req_alignment /= FormatChannelCount(attribute_format); } if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer), kVUID_Core_DrawState_InvalidVtxAttributeAlignment, "Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER " from %s and vertex %s.", i, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), report_data->FormatHandle(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer).c_str()); } } } } else { if ((!pCB->current_draw_data.vertex_buffer_bindings.empty()) && (!pCB->vertex_buffer_used)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_VtxIndexOutOfBounds, "Vertex buffers are bound to %s but no vertex buffers are attached to %s.", report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(state.pipeline_state->pipeline).c_str()); } } // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count. // Skip check if rasterization is disabled or there is no viewport. if ((!pPipeline->graphicsPipelineCI.pRasterizationState || (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) && pPipeline->graphicsPipelineCI.pViewportState) { bool dynViewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT); bool dynScissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR); if (dynViewport) { const auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1; const auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask; if (missingViewportMask) { std::stringstream ss; ss << "Dynamic viewport(s) "; ListBits(ss, missingViewportMask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport()."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str()); } } if (dynScissor) { const auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1; const auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask; if (missingScissorMask) { std::stringstream ss; ss << "Dynamic scissor(s) "; ListBits(ss, missingScissorMask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor()."; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str()); } } } // Verify that any MSAA request in PSO matches sample# in bound FB // Skip the check if rasterization is disabled. if (!pPipeline->graphicsPipelineCI.pRasterizationState || (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline); if (pCB->activeRenderPass) { const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr(); const VkSubpassDescription2KHR *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass]; uint32_t i; unsigned subpass_num_samples = 0; for (i = 0; i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples; } if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples; } if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples) && ((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NumSamplesMismatch, "Num samples mismatch! At draw-time in %s with %u samples while current %s w/ " "%u samples!", report_data->FormatHandle(pPipeline->pipeline).c_str(), pso_num_samples, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), subpass_num_samples); } } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NoActiveRenderpass, "No active render pass found at draw-time in %s!", report_data->FormatHandle(pPipeline->pipeline).c_str()); } } // Verify that PSO creation renderPass is compatible with active renderPass if (pCB->activeRenderPass) { // TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type // TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted // Error codes for renderpass and subpass mismatches auto rp_error = "VUID-vkCmdDraw-renderPass-02684", sp_error = "VUID-vkCmdDraw-subpass-02685"; switch (cmd_type) { case CMD_DRAWINDEXED: rp_error = "VUID-vkCmdDrawIndexed-renderPass-02684"; sp_error = "VUID-vkCmdDrawIndexed-subpass-02685"; break; case CMD_DRAWINDIRECT: rp_error = "VUID-vkCmdDrawIndirect-renderPass-02684"; sp_error = "VUID-vkCmdDrawIndirect-subpass-02685"; break; case CMD_DRAWINDIRECTCOUNTKHR: rp_error = "VUID-vkCmdDrawIndirectCountKHR-renderPass-02684"; sp_error = "VUID-vkCmdDrawIndirectCountKHR-subpass-02685"; break; case CMD_DRAWINDEXEDINDIRECT: rp_error = "VUID-vkCmdDrawIndexedIndirect-renderPass-02684"; sp_error = "VUID-vkCmdDrawIndexedIndirect-subpass-02685"; break; case CMD_DRAWINDEXEDINDIRECTCOUNTKHR: rp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-renderPass-02684"; sp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-subpass-02685"; break; case CMD_DRAWMESHTASKSNV: rp_error = "VUID-vkCmdDrawMeshTasksNV-renderPass-02684"; sp_error = "VUID-vkCmdDrawMeshTasksNV-subpass-02685"; break; case CMD_DRAWMESHTASKSINDIRECTNV: rp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-renderPass-02684"; sp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-subpass-02685"; break; case CMD_DRAWMESHTASKSINDIRECTCOUNTNV: rp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-renderPass-02684"; sp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-subpass-02685"; break; default: assert(CMD_DRAW == cmd_type); break; } if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) { // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with skip |= ValidateRenderPassCompatibility("active render pass", pCB->activeRenderPass, "pipeline state object", pPipeline->rp_state.get(), caller, rp_error); } if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, HandleToUint64(pPipeline->pipeline), sp_error, "Pipeline was built for subpass %u but used in subpass %u.", pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass); } } return skip; } // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to // pipelineLayout[layoutIndex] static bool VerifySetLayoutCompatibility(const cvdescriptorset::DescriptorSet *descriptor_set, PIPELINE_LAYOUT_STATE const *pipeline_layout, const uint32_t layoutIndex, string &errorMsg) { auto num_sets = pipeline_layout->set_layouts.size(); if (layoutIndex >= num_sets) { stringstream errorStr; errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index " << layoutIndex; errorMsg = errorStr.str(); return false; } if (descriptor_set->IsPushDescriptor()) return true; auto layout_node = pipeline_layout->set_layouts[layoutIndex]; return cvdescriptorset::VerifySetLayoutCompatibility(layout_node.get(), descriptor_set->GetLayout().get(), &errorMsg); } // Validate overall state at the time of a draw call bool CoreChecks::ValidateCmdBufDrawState(CMD_BUFFER_STATE *cb_node, CMD_TYPE cmd_type, const bool indexed, const VkPipelineBindPoint bind_point, const char *function, const char *pipe_err_code, const char *state_err_code) { bool result = false; auto const &state = cb_node->lastBound[bind_point]; PIPELINE_STATE *pPipe = state.pipeline_state; if (nullptr == pPipe) { return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), pipe_err_code, "Must not call %s on this command buffer while there is no %s pipeline bound.", function, bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute"); } // First check flag states if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result = ValidateDrawStateFlags(cb_node, pPipe, indexed, state_err_code); // Now complete other state checks string errorString; auto const &pipeline_layout = pPipe->pipeline_layout; for (const auto &set_binding_pair : pPipe->active_slots) { uint32_t setIndex = set_binding_pair.first; // If valid set is not bound throw an error if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) { result |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_DescriptorSetNotBound, "%s uses set #%u but that set is not bound.", report_data->FormatHandle(pPipe->pipeline).c_str(), setIndex); } else if (!VerifySetLayoutCompatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex, errorString)) { // Set is bound but not compatible w/ overlapping pipeline_layout from PSO VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet(); result |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(setHandle), kVUID_Core_DrawState_PipelineLayoutsIncompatible, "%s bound as set #%u is not compatible with overlapping %s due to: %s", report_data->FormatHandle(setHandle).c_str(), setIndex, report_data->FormatHandle(pipeline_layout.layout).c_str(), errorString.c_str()); } else { // Valid set is bound and layout compatible, validate that it's updated // Pull the set node cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex]; // Validate the draw-time state for this descriptor set std::string err_str; if (!descriptor_set->IsPushDescriptor()) { // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor // binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks. // Here, the currently bound pipeline determines whether an image validation check is redundant... // for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline. const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_node, pPipe); const auto &binding_req_map = reduced_map.Map(); if (!ValidateDrawState(descriptor_set, binding_req_map, state.dynamicOffsets[setIndex], cb_node, function, &err_str)) { auto set = descriptor_set->GetSet(); result |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), kVUID_Core_DrawState_DescriptorSetNotUpdated, "%s bound as set #%u encountered the following validation error at %s time: %s", report_data->FormatHandle(set).c_str(), setIndex, function, err_str.c_str()); } } } } // Check general pipeline state that needs to be validated at drawtime if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result |= ValidatePipelineDrawtimeState(state, cb_node, cmd_type, pPipe, function); return result; } void CoreChecks::UpdateDrawState(CMD_BUFFER_STATE *cb_state, const VkPipelineBindPoint bind_point) { auto const &state = cb_state->lastBound[bind_point]; PIPELINE_STATE *pPipe = state.pipeline_state; if (VK_NULL_HANDLE != state.pipeline_layout) { for (const auto &set_binding_pair : pPipe->active_slots) { uint32_t setIndex = set_binding_pair.first; // Pull the set node cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex]; if (!descriptor_set->IsPushDescriptor()) { // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_state); const auto &binding_req_map = reduced_map.Map(); // Bind this set and its active descriptor resources to the command buffer descriptor_set->UpdateDrawState(this, cb_state, binding_req_map); // For given active slots record updated images & buffers descriptor_set->GetStorageUpdates(binding_req_map, &cb_state->updateBuffers, &cb_state->updateImages); } } } if (!pPipe->vertex_binding_descriptions_.empty()) { cb_state->vertex_buffer_used = true; } } bool CoreChecks::ValidatePipelineLocked(std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) { bool skip = false; PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get(); // If create derivative bit is set, check that we've specified a base // pipeline correctly, and that the base pipeline was created to allow // derivatives. if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { PIPELINE_STATE *pBasePipeline = nullptr; if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^ (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) { // This check is a superset of "VUID-VkGraphicsPipelineCreateInfo-flags-00724" and // "VUID-VkGraphicsPipelineCreateInfo-flags-00725" skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState, "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified"); } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) { if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-vkCreateGraphicsPipelines-flags-00720", "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline."); } else { pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get(); } } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { pBasePipeline = GetPipelineState(pPipeline->graphicsPipelineCI.basePipelineHandle); } if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState, "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives."); } } return skip; } // UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function. bool CoreChecks::ValidatePipelineUnlocked(std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) { bool skip = false; PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get(); // Ensure the subpass index is valid. If not, then ValidateAndCapturePipelineShaderState // produces nonsense errors that confuse users. Other layers should already // emit errors for renderpass being invalid. auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass]; if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-00759", "Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u).", pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1); subpass_desc = nullptr; } if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) { const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState; if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746", "vkCreateGraphicsPipelines(): %s subpass %u has colorAttachmentCount of %u which doesn't " "match the pColorBlendState->attachmentCount of %u.", report_data->FormatHandle(pPipeline->rp_state->renderPass).c_str(), pPipeline->graphicsPipelineCI.subpass, subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount); } if (!enabled_features.core.independentBlend) { if (pPipeline->attachments.size() > 1) { VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0]; for (size_t i = 1; i < pPipeline->attachments.size(); i++) { // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains // only attachment state, so memcmp is best suited for the comparison if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]), sizeof(pAttachments[0]))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605", "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of " "pAttachments must be identical."); break; } } } } if (!enabled_features.core.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606", "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE."); } for (size_t i = 0; i < pPipeline->attachments.size(); i++) { if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor); } } if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor); } } if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor); } } if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor); } } } } if (ValidateAndCapturePipelineShaderState(pPipeline)) { skip = true; } // Each shader's stage must be unique if (pPipeline->duplicate_shaders) { for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) { if (pPipeline->duplicate_shaders & stage) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_InvalidPipelineCreateState, "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s", string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage))); } } } if (device_extensions.vk_nv_mesh_shader) { // VS or mesh is required if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-stage-02096", "Invalid Pipeline CreateInfo State: Vertex Shader or Mesh Shader required."); } // Can't mix mesh and VTG if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) && (pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-02095", "Invalid Pipeline CreateInfo State: Geometric shader stages must either be all mesh (mesh | task) " "or all VTG (vertex, tess control, tess eval, geom)."); } } else { // VS is required if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-stage-00727", "Invalid Pipeline CreateInfo State: Vertex Shader required."); } } if (!enabled_features.mesh_shader.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineShaderStageCreateInfo-stage-02091", "Invalid Pipeline CreateInfo State: Mesh Shader not supported."); } if (!enabled_features.mesh_shader.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineShaderStageCreateInfo-stage-02092", "Invalid Pipeline CreateInfo State: Task Shader not supported."); } // Either both or neither TC/TE shaders should be defined bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0; bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0; if (has_control && !has_eval) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-00729", "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair."); } if (!has_control && has_eval) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-00730", "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair."); } // Compute shaders should be specified independent of Gfx shaders if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-stage-00728", "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline."); } if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pInputAssemblyState) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-02098", "Invalid Pipeline CreateInfo State: Missing pInputAssemblyState."); } // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines. // Mismatching primitive topology and tessellation fails graphics pipeline creation. if (has_control && has_eval && (!pPipeline->graphicsPipelineCI.pInputAssemblyState || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-00736", "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for " "tessellation pipelines."); } if (pPipeline->graphicsPipelineCI.pInputAssemblyState) { if (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) { if (!has_control || !has_eval) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-topology-00737", "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid " "for tessellation pipelines."); } } if ((pPipeline->graphicsPipelineCI.pInputAssemblyState->primitiveRestartEnable == VK_TRUE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428", "topology is %s and primitiveRestartEnable is VK_TRUE. It is invalid.", string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } if ((enabled_features.core.geometryShader == VK_FALSE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429", "topology is %s and geometry shaders feature is not enabled. It is invalid.", string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } if ((enabled_features.core.tessellationShader == VK_FALSE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430", "topology is %s and tessellation shaders feature is not enabled. It is invalid.", string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } } // If a rasterization state is provided... if (pPipeline->graphicsPipelineCI.pRasterizationState) { if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) && (!enabled_features.core.depthClamp)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782", "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member " "of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE."); } if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) && (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_InvalidFeature, "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member " "of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the " "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled"); } // If rasterization is enabled... if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) { if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) && (!enabled_features.core.alphaToOne)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785", "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable " "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE."); } // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure if (subpass_desc && subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (!pPipeline->graphicsPipelineCI.pDepthStencilState) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752", "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled " "and subpass uses a depth/stencil attachment."); } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) && (!enabled_features.core.depthBounds)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598", "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the " "depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be " "set to VK_FALSE."); } } // If subpass uses color attachments, pColorBlendState must be valid pointer if (subpass_desc) { uint32_t color_attachment_count = 0; for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) { if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) { ++color_attachment_count; } } if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753", "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and " "subpass uses color attachments."); } } } } if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-pStages-02097", "Invalid Pipeline CreateInfo State: Missing pVertexInputState."); } auto vi = pPipeline->graphicsPipelineCI.pVertexInputState; if (vi != NULL) { for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) { VkFormat format = vi->pVertexAttributeDescriptions[j].format; // Internal call to get format info. Still goes through layers, could potentially go directly to ICD. VkFormatProperties properties; DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &properties); if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputAttributeDescription-format-00623", "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format " "(%s) is not a supported vertex buffer format.", pipelineIndex, j, string_VkFormat(format)); } } } auto accumColorSamples = [subpass_desc, pPipeline](uint32_t &samples) { for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) { samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } } }; if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples)) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_num_samples = 0; accumColorSamples(subpass_num_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } // subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED. // Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED. if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-00757", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "does not match the number of samples of the RenderPass color and/or depth attachment.", pipelineIndex, raster_samples); } } if (device_extensions.vk_amd_mixed_attachment_samples) { VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0); for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) { if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) { max_sample_count = std::max(max_sample_count, pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples); } } if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { max_sample_count = std::max(max_sample_count, pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples); } if ((pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) && (pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01505", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max " "attachment samples (%s) used in subpass %u.", pipelineIndex, string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples), string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass); } } if (device_extensions.vk_nv_framebuffer_mixed_samples) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_color_samples = 0; accumColorSamples(subpass_color_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; const uint32_t subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); if (pPipeline->graphicsPipelineCI.pDepthStencilState) { const bool ds_test_enabled = (pPipeline->graphicsPipelineCI.pDepthStencilState->depthTestEnable == VK_TRUE) || (pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) || (pPipeline->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE); if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01411", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "does not match the number of samples of the RenderPass depth attachment (%u).", pipelineIndex, raster_samples, subpass_depth_samples); } } } if (IsPowerOfTwo(subpass_color_samples)) { if (raster_samples < subpass_color_samples) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkGraphicsPipelineCreateInfo-subpass-01412", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "is not greater or equal to the number of samples of the RenderPass color attachment (%u).", pipelineIndex, raster_samples, subpass_color_samples); } if (pPipeline->graphicsPipelineCI.pMultisampleState) { if ((raster_samples > subpass_color_samples) && (pPipeline->graphicsPipelineCI.pMultisampleState->sampleShadingEnable == VK_TRUE)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->sampleShadingEnable must be VK_FALSE when " "pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) is greater than the number of samples of the " "subpass color attachment (%u).", pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples); } const auto *coverage_modulation_state = lvl_find_in_chain<VkPipelineCoverageModulationStateCreateInfoNV>( pPipeline->graphicsPipelineCI.pMultisampleState->pNext); if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) { if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405", "vkCreateGraphicsPipelines: pCreateInfos[%d] VkPipelineCoverageModulationStateCreateInfoNV " "coverageModulationTableCount of %u is invalid.", pipelineIndex, coverage_modulation_state->coverageModulationTableCount); } } } } } if (device_extensions.vk_nv_fragment_coverage_to_color) { const auto coverage_to_color_state = lvl_find_in_chain<VkPipelineCoverageToColorStateCreateInfoNV>(pPipeline->graphicsPipelineCI.pMultisampleState); if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) { bool attachment_is_valid = false; std::string error_detail; if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) { const auto color_attachment_ref = subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation]; if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { const auto color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment]; switch (color_attachment.format) { case VK_FORMAT_R8_UINT: case VK_FORMAT_R8_SINT: case VK_FORMAT_R16_UINT: case VK_FORMAT_R16_SINT: case VK_FORMAT_R32_UINT: case VK_FORMAT_R32_SINT: attachment_is_valid = true; break; default: string_sprintf(&error_detail, "references an attachment with an invalid format (%s).", string_VkFormat(color_attachment.format)); break; } } else { string_sprintf(&error_detail, "references an invalid attachment. The subpass pColorAttachments[%" PRIu32 "].attachment has the value " "VK_ATTACHMENT_UNUSED.", coverage_to_color_state->coverageToColorLocation); } } else { string_sprintf(&error_detail, "references an non-existing attachment since the subpass colorAttachmentCount is %" PRIu32 ".", subpass_desc->colorAttachmentCount); } if (!attachment_is_valid) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404", "vkCreateGraphicsPipelines: pCreateInfos[%" PRId32 "].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV " "coverageToColorLocation = %" PRIu32 " %s", pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str()); } } } return skip; } // Block of code at start here specifically for managing/tracking DSs // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer // func_str is the name of the calling function // Return false if no errors occur // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain) bool CoreChecks::ValidateIdleDescriptorSet(VkDescriptorSet set, const char *func_str) { if (disabled.idle_descriptor_set) return false; bool skip = false; auto set_node = setMap.find(set); if (set_node == setMap.end()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), kVUID_Core_DrawState_DoubleDestroy, "Cannot call %s() on %s that has not been allocated.", func_str, report_data->FormatHandle(set).c_str()); } else { // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here if (set_node->second->in_use.load()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), "VUID-vkFreeDescriptorSets-pDescriptorSets-00309", "Cannot call %s() on %s that is in use by a command buffer.", func_str, report_data->FormatHandle(set).c_str()); } } return skip; } // Remove set from setMap and delete the set void CoreChecks::FreeDescriptorSet(cvdescriptorset::DescriptorSet *descriptor_set) { setMap.erase(descriptor_set->GetSet()); } // Free all DS Pools including their Sets & related sub-structs // NOTE : Calls to this function should be wrapped in mutex void CoreChecks::DeletePools() { for (auto ii = descriptorPoolMap.begin(); ii != descriptorPoolMap.end();) { // Remove this pools' sets from setMap and delete them for (auto ds : ii->second->sets) { FreeDescriptorSet(ds); } ii->second->sets.clear(); ii = descriptorPoolMap.erase(ii); } } // If a renderpass is active, verify that the given command type is appropriate for current subpass state bool CoreChecks::ValidateCmdSubpassState(const CMD_BUFFER_STATE *pCB, const CMD_TYPE cmd_type) { if (!pCB->activeRenderPass) return false; bool skip = false; if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS && cmd_type != CMD_NEXTSUBPASS2KHR && cmd_type != CMD_ENDRENDERPASS2KHR)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer, "Commands cannot be called in a subpass using secondary command buffers."); } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer, "vkCmdExecuteCommands() cannot be called in a subpass using inline commands."); } return skip; } bool CoreChecks::ValidateCmdQueueFlags(const CMD_BUFFER_STATE *cb_node, const char *caller_name, VkQueueFlags required_flags, const char *error_code) { auto pool = GetCommandPoolState(cb_node->createInfo.commandPool); if (pool) { VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].queueFlags; if (!(required_flags & queue_flags)) { string required_flags_string; for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) { if (flag & required_flags) { if (required_flags_string.size()) { required_flags_string += " or "; } required_flags_string += string_VkQueueFlagBits(flag); } } return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), error_code, "Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name, required_flags_string.c_str()); } } return false; } static char const *GetCauseStr(VulkanTypedHandle obj) { if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated"; if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded"; return "destroyed"; } bool CoreChecks::ReportInvalidCommandBuffer(const CMD_BUFFER_STATE *cb_state, const char *call_source) { bool skip = false; for (auto obj : cb_state->broken_bindings) { const char *cause_str = GetCauseStr(obj); string VUID; string_sprintf(&VUID, "%s-%s", kVUID_Core_DrawState_InvalidCommandBuffer, object_string[obj.type]); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), VUID.c_str(), "You are adding %s to %s that is invalid because bound %s was %s.", call_source, report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(obj).c_str(), cause_str); } return skip; } // 'commandBuffer must be in the recording state' valid usage error code for each command // Autogenerated as part of the vk_validation_error_message.h codegen static const std::array<const char *, CMD_RANGE_SIZE> must_be_recording_list = {{VUID_MUST_BE_RECORDING_LIST}}; // Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if // there's an issue with the Cmd ordering bool CoreChecks::ValidateCmd(const CMD_BUFFER_STATE *cb_state, const CMD_TYPE cmd, const char *caller_name) { switch (cb_state->state) { case CB_RECORDING: return ValidateCmdSubpassState(cb_state, cmd); case CB_INVALID_COMPLETE: case CB_INVALID_INCOMPLETE: return ReportInvalidCommandBuffer(cb_state, caller_name); default: assert(cmd != CMD_NONE); const auto error = must_be_recording_list[cmd]; return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), error, "You must call vkBeginCommandBuffer() before this call to %s.", caller_name); } } bool CoreChecks::ValidateDeviceMaskToPhysicalDeviceCount(uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle, const char *VUID) { bool skip = false; uint32_t count = 1 << physical_device_count; if (count <= deviceMask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID, "deviceMask(0x%" PRIx32 ") is invaild. Physical device count is %" PRIu32 ".", deviceMask, physical_device_count); } return skip; } bool CoreChecks::ValidateDeviceMaskToZero(uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle, const char *VUID) { bool skip = false; if (deviceMask == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID, "deviceMask(0x%" PRIx32 ") must be non-zero.", deviceMask); } return skip; } bool CoreChecks::ValidateDeviceMaskToCommandBuffer(CMD_BUFFER_STATE *pCB, uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle, const char *VUID) { bool skip = false; if ((deviceMask & pCB->initial_device_mask) != deviceMask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s initial device mask(0x%" PRIx32 ").", deviceMask, report_data->FormatHandle(pCB->commandBuffer).c_str(), pCB->initial_device_mask); } return skip; } bool CoreChecks::ValidateDeviceMaskToRenderPass(CMD_BUFFER_STATE *pCB, uint32_t deviceMask, VkDebugReportObjectTypeEXT VUID_handle_type, uint64_t VUID_handle, const char *VUID) { bool skip = false; if ((deviceMask & pCB->active_render_pass_device_mask) != deviceMask) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VUID_handle_type, VUID_handle, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s device mask(0x%" PRIx32 ").", deviceMask, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), pCB->active_render_pass_device_mask); } return skip; } // For given object struct return a ptr of BASE_NODE type for its wrapping struct BASE_NODE *CoreChecks::GetStateStructPtrFromObject(const VulkanTypedHandle &object_struct) { BASE_NODE *base_ptr = nullptr; switch (object_struct.type) { case kVulkanObjectTypeDescriptorSet: { base_ptr = GetSetNode(object_struct.Cast<VkDescriptorSet>()); break; } case kVulkanObjectTypeSampler: { base_ptr = GetSamplerState(object_struct.Cast<VkSampler>()); break; } case kVulkanObjectTypeQueryPool: { base_ptr = GetQueryPoolState(object_struct.Cast<VkQueryPool>()); break; } case kVulkanObjectTypePipeline: { base_ptr = GetPipelineState(object_struct.Cast<VkPipeline>()); break; } case kVulkanObjectTypeBuffer: { base_ptr = GetBufferState(object_struct.Cast<VkBuffer>()); break; } case kVulkanObjectTypeBufferView: { base_ptr = GetBufferViewState(object_struct.Cast<VkBufferView>()); break; } case kVulkanObjectTypeImage: { base_ptr = GetImageState(object_struct.Cast<VkImage>()); break; } case kVulkanObjectTypeImageView: { base_ptr = GetImageViewState(object_struct.Cast<VkImageView>()); break; } case kVulkanObjectTypeEvent: { base_ptr = GetEventState(object_struct.Cast<VkEvent>()); break; } case kVulkanObjectTypeDescriptorPool: { base_ptr = GetDescriptorPoolState(object_struct.Cast<VkDescriptorPool>()); break; } case kVulkanObjectTypeCommandPool: { base_ptr = GetCommandPoolState(object_struct.Cast<VkCommandPool>()); break; } case kVulkanObjectTypeFramebuffer: { base_ptr = GetFramebufferState(object_struct.Cast<VkFramebuffer>()); break; } case kVulkanObjectTypeRenderPass: { base_ptr = GetRenderPassState(object_struct.Cast<VkRenderPass>()); break; } case kVulkanObjectTypeDeviceMemory: { base_ptr = GetDevMemState(object_struct.Cast<VkDeviceMemory>()); break; } default: // TODO : Any other objects to be handled here? assert(0); break; } return base_ptr; } // Tie the VulkanTypedHandle to the cmd buffer which includes: // Add object_binding to cmd buffer // Add cb_binding to object static void AddCommandBufferBinding(std::unordered_set<CMD_BUFFER_STATE *> *cb_bindings, const VulkanTypedHandle &obj, CMD_BUFFER_STATE *cb_node) { cb_bindings->insert(cb_node); cb_node->object_bindings.insert(obj); } // For a given object, if cb_node is in that objects cb_bindings, remove cb_node void CoreChecks::RemoveCommandBufferBinding(VulkanTypedHandle const &object, CMD_BUFFER_STATE *cb_node) { BASE_NODE *base_obj = GetStateStructPtrFromObject(object); if (base_obj) base_obj->cb_bindings.erase(cb_node); } // Reset the command buffer state // Maintain the createInfo and set state to CB_NEW, but clear all other state void CoreChecks::ResetCommandBufferState(const VkCommandBuffer cb) { CMD_BUFFER_STATE *pCB = GetCBState(cb); if (pCB) { pCB->in_use.store(0); // Reset CB state (note that createInfo is not cleared) pCB->commandBuffer = cb; memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo)); memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo)); pCB->hasDrawCmd = false; pCB->state = CB_NEW; pCB->submitCount = 0; pCB->image_layout_change_count = 1; // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty pCB->status = 0; pCB->static_status = 0; pCB->viewportMask = 0; pCB->scissorMask = 0; for (auto &item : pCB->lastBound) { item.second.reset(); } memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo)); pCB->activeRenderPass = nullptr; pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE; pCB->activeSubpass = 0; pCB->broken_bindings.clear(); pCB->waitedEvents.clear(); pCB->events.clear(); pCB->writeEventsBeforeWait.clear(); pCB->waitedEventsBeforeQueryReset.clear(); pCB->queryToStateMap.clear(); pCB->activeQueries.clear(); pCB->startedQueries.clear(); pCB->image_layout_map.clear(); pCB->eventToStageMap.clear(); pCB->draw_data.clear(); pCB->current_draw_data.vertex_buffer_bindings.clear(); pCB->vertex_buffer_used = false; pCB->primaryCommandBuffer = VK_NULL_HANDLE; // If secondary, invalidate any primary command buffer that may call us. if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) { InvalidateCommandBuffers(pCB->linkedCommandBuffers, VulkanTypedHandle(cb, kVulkanObjectTypeCommandBuffer)); } // Remove reverse command buffer links. for (auto pSubCB : pCB->linkedCommandBuffers) { pSubCB->linkedCommandBuffers.erase(pCB); } pCB->linkedCommandBuffers.clear(); pCB->updateImages.clear(); pCB->updateBuffers.clear(); ClearCmdBufAndMemReferences(pCB); pCB->queue_submit_functions.clear(); pCB->cmd_execute_commands_functions.clear(); pCB->eventUpdates.clear(); pCB->queryUpdates.clear(); // Remove object bindings for (const auto &obj : pCB->object_bindings) { RemoveCommandBufferBinding(obj, pCB); } pCB->object_bindings.clear(); // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list for (auto framebuffer : pCB->framebuffers) { auto fb_state = GetFramebufferState(framebuffer); if (fb_state) fb_state->cb_bindings.erase(pCB); } pCB->framebuffers.clear(); pCB->activeFramebuffer = VK_NULL_HANDLE; memset(&pCB->index_buffer_binding, 0, sizeof(pCB->index_buffer_binding)); pCB->qfo_transfer_image_barriers.Reset(); pCB->qfo_transfer_buffer_barriers.Reset(); // Clean up the label data ResetCmdDebugUtilsLabel(report_data, pCB->commandBuffer); pCB->debug_label.Reset(); } if (enabled.gpu_validation) { GpuResetCommandBuffer(cb); } } CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) { // initially assume everything is static state CBStatusFlags flags = CBSTATUS_ALL_STATE_SET; if (ds) { for (uint32_t i = 0; i < ds->dynamicStateCount; i++) { switch (ds->pDynamicStates[i]) { case VK_DYNAMIC_STATE_LINE_WIDTH: flags &= ~CBSTATUS_LINE_WIDTH_SET; break; case VK_DYNAMIC_STATE_DEPTH_BIAS: flags &= ~CBSTATUS_DEPTH_BIAS_SET; break; case VK_DYNAMIC_STATE_BLEND_CONSTANTS: flags &= ~CBSTATUS_BLEND_CONSTANTS_SET; break; case VK_DYNAMIC_STATE_DEPTH_BOUNDS: flags &= ~CBSTATUS_DEPTH_BOUNDS_SET; break; case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK: flags &= ~CBSTATUS_STENCIL_READ_MASK_SET; break; case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK: flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET; break; case VK_DYNAMIC_STATE_STENCIL_REFERENCE: flags &= ~CBSTATUS_STENCIL_REFERENCE_SET; break; case VK_DYNAMIC_STATE_SCISSOR: flags &= ~CBSTATUS_SCISSOR_SET; break; case VK_DYNAMIC_STATE_VIEWPORT: flags &= ~CBSTATUS_VIEWPORT_SET; break; case VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV: flags &= ~CBSTATUS_EXCLUSIVE_SCISSOR_SET; break; case VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV: flags &= ~CBSTATUS_SHADING_RATE_PALETTE_SET; break; default: break; } } } return flags; } // Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a // render pass. bool CoreChecks::InsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) { bool inside = false; if (pCB->activeRenderPass) { inside = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), msgCode, "%s: It is invalid to issue this call inside an active %s.", apiName, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str()); } return inside; } // Flags validation error if the associated call is made outside a render pass. The apiName // routine should ONLY be called inside a render pass. bool CoreChecks::OutsideRenderPass(CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) { bool outside = false; if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) || ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) && !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) { outside = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), msgCode, "%s: This call must be issued inside an active render pass.", apiName); } return outside; } void CoreChecks::InitGpuValidation() { // Process the layer settings file. enum CoreValidationGpuFlagBits { CORE_VALIDATION_GPU_VALIDATION_ALL_BIT = 0x00000001, CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT = 0x00000002, }; typedef VkFlags CoreGPUFlags; static const std::unordered_map<std::string, VkFlags> gpu_flags_option_definitions = { {std::string("all"), CORE_VALIDATION_GPU_VALIDATION_ALL_BIT}, {std::string("reserve_binding_slot"), CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT}, }; std::string gpu_flags_key = "lunarg_core_validation.gpu_validation"; CoreGPUFlags gpu_flags = GetLayerOptionFlags(gpu_flags_key, gpu_flags_option_definitions, 0); gpu_flags_key = "khronos_validation.gpu_validation"; gpu_flags |= GetLayerOptionFlags(gpu_flags_key, gpu_flags_option_definitions, 0); if (gpu_flags & CORE_VALIDATION_GPU_VALIDATION_ALL_BIT) { instance_state->enabled.gpu_validation = true; } if (gpu_flags & CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT) { instance_state->enabled.gpu_validation_reserve_binding_slot = true; } } void CoreChecks::PostCallRecordCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance, VkResult result) { if (VK_SUCCESS != result) return; InitGpuValidation(); } bool CoreChecks::ValidateQueueFamilyIndex(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family, const char *err_code, const char *cmd_name, const char *queue_family_var_name) { bool skip = false; if (requested_queue_family >= pd_state->queue_family_known_count) { const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : ""; const std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) ? "the pQueueFamilyPropertyCount was never obtained" : "i.e. is not less than " + std::to_string(pd_state->queue_family_known_count); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), err_code, "%s: %s (= %" PRIu32 ") is not less than any previously obtained pQueueFamilyPropertyCount from " "vkGetPhysicalDeviceQueueFamilyProperties%s (%s).", cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str()); } return skip; } // Verify VkDeviceQueueCreateInfos bool CoreChecks::ValidateDeviceQueueCreateInfos(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t info_count, const VkDeviceQueueCreateInfo *infos) { bool skip = false; std::unordered_set<uint32_t> queue_family_set; for (uint32_t i = 0; i < info_count; ++i) { const auto requested_queue_family = infos[i].queueFamilyIndex; std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex"; skip |= ValidateQueueFamilyIndex(pd_state, requested_queue_family, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381", "vkCreateDevice", queue_family_var_name.c_str()); if (queue_family_set.insert(requested_queue_family).second == false) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(pd_state->phys_device), "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372", "CreateDevice(): %s (=%" PRIu32 ") is not unique within pQueueCreateInfos.", queue_family_var_name.c_str(), requested_queue_family); } // Verify that requested queue count of queue family is known to be valid at this point in time if (requested_queue_family < pd_state->queue_family_known_count) { const auto requested_queue_count = infos[i].queueCount; const bool queue_family_has_props = requested_queue_family < pd_state->queue_family_properties.size(); // spec guarantees at least one queue for each queue family const uint32_t available_queue_count = queue_family_has_props ? pd_state->queue_family_properties[requested_queue_family].queueCount : 1; const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : ""; if (requested_queue_count > available_queue_count) { const std::string count_note = queue_family_has_props ? "i.e. is not less than or equal to " + std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount) : "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), "VUID-VkDeviceQueueCreateInfo-queueCount-00382", "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32 ") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).", i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str()); } } } return skip; } bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { bool skip = false; auto pd_state = GetPhysicalDeviceState(gpu); // TODO: object_tracker should perhaps do this instead // and it does not seem to currently work anyway -- the loader just crashes before this point if (!pd_state) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, kVUID_Core_DevLimit_MustQueryCount, "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices()."); } skip |= ValidateDeviceQueueCreateInfos(pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos); return skip; } void CoreChecks::PreCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, std::unique_ptr<safe_VkDeviceCreateInfo> &modified_create_info) { // GPU Validation can possibly turn on device features, so give it a chance to change the create info. if (enabled.gpu_validation) { VkPhysicalDeviceFeatures supported_features; DispatchGetPhysicalDeviceFeatures(gpu, &supported_features); GpuPreCallRecordCreateDevice(gpu, modified_create_info, &supported_features); } } void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) { if (VK_SUCCESS != result) return; const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures; if (nullptr == enabled_features_found) { const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext); if (features2) { enabled_features_found = &(features2->features); } } ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map); ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation); CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data); if (nullptr == enabled_features_found) { core_checks->enabled_features.core = {}; } else { core_checks->enabled_features.core = *enabled_features_found; } // Make sure that queue_family_properties are obtained for this device's physical_device, even if the app has not // previously set them through an explicit API call. uint32_t count; auto pd_state = GetPhysicalDeviceState(gpu); DispatchGetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr); pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count)); DispatchGetPhysicalDeviceQueueFamilyProperties(gpu, &count, &pd_state->queue_family_properties[0]); // Save local link to this device's physical device state core_checks->physical_device_state = pd_state; const auto *device_group_ci = lvl_find_in_chain<VkDeviceGroupDeviceCreateInfo>(pCreateInfo->pNext); core_checks->physical_device_count = device_group_ci && device_group_ci->physicalDeviceCount > 0 ? device_group_ci->physicalDeviceCount : 1; const auto *descriptor_indexing_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pCreateInfo->pNext); if (descriptor_indexing_features) { core_checks->enabled_features.descriptor_indexing = *descriptor_indexing_features; } const auto *eight_bit_storage_features = lvl_find_in_chain<VkPhysicalDevice8BitStorageFeaturesKHR>(pCreateInfo->pNext); if (eight_bit_storage_features) { core_checks->enabled_features.eight_bit_storage = *eight_bit_storage_features; } const auto *exclusive_scissor_features = lvl_find_in_chain<VkPhysicalDeviceExclusiveScissorFeaturesNV>(pCreateInfo->pNext); if (exclusive_scissor_features) { core_checks->enabled_features.exclusive_scissor = *exclusive_scissor_features; } const auto *shading_rate_image_features = lvl_find_in_chain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext); if (shading_rate_image_features) { core_checks->enabled_features.shading_rate_image = *shading_rate_image_features; } const auto *mesh_shader_features = lvl_find_in_chain<VkPhysicalDeviceMeshShaderFeaturesNV>(pCreateInfo->pNext); if (mesh_shader_features) { core_checks->enabled_features.mesh_shader = *mesh_shader_features; } const auto *inline_uniform_block_features = lvl_find_in_chain<VkPhysicalDeviceInlineUniformBlockFeaturesEXT>(pCreateInfo->pNext); if (inline_uniform_block_features) { core_checks->enabled_features.inline_uniform_block = *inline_uniform_block_features; } const auto *transform_feedback_features = lvl_find_in_chain<VkPhysicalDeviceTransformFeedbackFeaturesEXT>(pCreateInfo->pNext); if (transform_feedback_features) { core_checks->enabled_features.transform_feedback_features = *transform_feedback_features; } const auto *float16_int8_features = lvl_find_in_chain<VkPhysicalDeviceFloat16Int8FeaturesKHR>(pCreateInfo->pNext); if (float16_int8_features) { core_checks->enabled_features.float16_int8 = *float16_int8_features; } const auto *vtx_attrib_div_features = lvl_find_in_chain<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT>(pCreateInfo->pNext); if (vtx_attrib_div_features) { core_checks->enabled_features.vtx_attrib_divisor_features = *vtx_attrib_div_features; } const auto *scalar_block_layout_features = lvl_find_in_chain<VkPhysicalDeviceScalarBlockLayoutFeaturesEXT>(pCreateInfo->pNext); if (scalar_block_layout_features) { core_checks->enabled_features.scalar_block_layout_features = *scalar_block_layout_features; } const auto *buffer_address = lvl_find_in_chain<VkPhysicalDeviceBufferAddressFeaturesEXT>(pCreateInfo->pNext); if (buffer_address) { core_checks->enabled_features.buffer_address = *buffer_address; } const auto *cooperative_matrix_features = lvl_find_in_chain<VkPhysicalDeviceCooperativeMatrixFeaturesNV>(pCreateInfo->pNext); if (cooperative_matrix_features) { core_checks->enabled_features.cooperative_matrix_features = *cooperative_matrix_features; } const auto *float_controls_features = lvl_find_in_chain<VkPhysicalDeviceFloatControlsPropertiesKHR>(pCreateInfo->pNext); if (float_controls_features) { core_checks->enabled_features.float_controls = *float_controls_features; } const auto *host_query_reset_features = lvl_find_in_chain<VkPhysicalDeviceHostQueryResetFeaturesEXT>(pCreateInfo->pNext); if (host_query_reset_features) { core_checks->enabled_features.host_query_reset_features = *host_query_reset_features; } const auto *compute_shader_derivatives_features = lvl_find_in_chain<VkPhysicalDeviceComputeShaderDerivativesFeaturesNV>(pCreateInfo->pNext); if (compute_shader_derivatives_features) { core_checks->enabled_features.compute_shader_derivatives_features = *compute_shader_derivatives_features; } const auto *fragment_shader_barycentric_features = lvl_find_in_chain<VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV>(pCreateInfo->pNext); if (fragment_shader_barycentric_features) { core_checks->enabled_features.fragment_shader_barycentric_features = *fragment_shader_barycentric_features; } const auto *shader_image_footprint_features = lvl_find_in_chain<VkPhysicalDeviceShaderImageFootprintFeaturesNV>(pCreateInfo->pNext); if (shader_image_footprint_features) { core_checks->enabled_features.shader_image_footprint_features = *shader_image_footprint_features; } // Store physical device properties and physical device mem limits into CoreChecks structs DispatchGetPhysicalDeviceMemoryProperties(gpu, &core_checks->phys_dev_mem_props); DispatchGetPhysicalDeviceProperties(gpu, &core_checks->phys_dev_props); const auto &dev_ext = core_checks->device_extensions; auto *phys_dev_props = &core_checks->phys_dev_ext_props; if (dev_ext.vk_khr_push_descriptor) { // Get the needed push_descriptor limits VkPhysicalDevicePushDescriptorPropertiesKHR push_descriptor_prop; GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_khr_push_descriptor, &push_descriptor_prop); phys_dev_props->max_push_descriptors = push_descriptor_prop.maxPushDescriptors; } GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_descriptor_indexing, &phys_dev_props->descriptor_indexing_props); GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_nv_shading_rate_image, &phys_dev_props->shading_rate_image_props); GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_nv_mesh_shader, &phys_dev_props->mesh_shader_props); GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_inline_uniform_block, &phys_dev_props->inline_uniform_block_props); GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_vertex_attribute_divisor, &phys_dev_props->vtx_attrib_divisor_props); GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_khr_depth_stencil_resolve, &phys_dev_props->depth_stencil_resolve_props); GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_transform_feedback, &phys_dev_props->transform_feedback_props); if (enabled.gpu_validation) { core_checks->GpuPostCallRecordCreateDevice(&enabled, pCreateInfo); } if (core_checks->device_extensions.vk_nv_cooperative_matrix) { // Get the needed cooperative_matrix properties auto cooperative_matrix_props = lvl_init_struct<VkPhysicalDeviceCooperativeMatrixPropertiesNV>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&cooperative_matrix_props); instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2); core_checks->phys_dev_ext_props.cooperative_matrix_props = cooperative_matrix_props; uint32_t numCooperativeMatrixProperties = 0; instance_dispatch_table.GetPhysicalDeviceCooperativeMatrixPropertiesNV(gpu, &numCooperativeMatrixProperties, NULL); core_checks->cooperative_matrix_properties.resize(numCooperativeMatrixProperties, lvl_init_struct<VkCooperativeMatrixPropertiesNV>()); instance_dispatch_table.GetPhysicalDeviceCooperativeMatrixPropertiesNV(gpu, &numCooperativeMatrixProperties, core_checks->cooperative_matrix_properties.data()); } if (core_checks->phys_dev_props.apiVersion >= VK_API_VERSION_1_1) { // Get the needed subgroup limits auto subgroup_prop = lvl_init_struct<VkPhysicalDeviceSubgroupProperties>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&subgroup_prop); instance_dispatch_table.GetPhysicalDeviceProperties2(gpu, &prop2); core_checks->phys_dev_ext_props.subgroup_props = subgroup_prop; } // Store queue family data if ((pCreateInfo != nullptr) && (pCreateInfo->pQueueCreateInfos != nullptr)) { for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; ++i) { core_checks->queue_family_index_map.insert( std::make_pair(pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex, pCreateInfo->pQueueCreateInfos[i].queueCount)); } } } void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { if (!device) return; if (enabled.gpu_validation) { GpuPreCallRecordDestroyDevice(); } pipelineMap.clear(); renderPassMap.clear(); commandBufferMap.clear(); // This will also delete all sets in the pool & remove them from setMap DeletePools(); // All sets should be removed assert(setMap.empty()); descriptorSetLayoutMap.clear(); imageViewMap.clear(); imageMap.clear(); imageSubresourceMap.clear(); imageLayoutMap.clear(); bufferViewMap.clear(); bufferMap.clear(); // Queues persist until device is destroyed queueMap.clear(); layer_debug_utils_destroy_device(device); } // For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id // and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id. // Similarly for mesh and task shaders. bool CoreChecks::ValidateStageMaskGsTsEnables(VkPipelineStageFlags stageMask, const char *caller, const char *geo_error_id, const char *tess_error_id, const char *mesh_error_id, const char *task_error_id) { bool skip = false; if (!enabled_features.core.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, geo_error_id, "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have " "geometryShader feature enabled.", caller); } if (!enabled_features.core.tessellationShader && (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, tess_error_id, "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or " "VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have " "tessellationShader feature enabled.", caller); } if (!enabled_features.mesh_shader.meshShader && (stageMask & VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, mesh_error_id, "%s call includes a stageMask with VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV bit set when device does not have " "VkPhysicalDeviceMeshShaderFeaturesNV::meshShader feature enabled.", caller); } if (!enabled_features.mesh_shader.taskShader && (stageMask & VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, task_error_id, "%s call includes a stageMask with VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV bit set when device does not have " "VkPhysicalDeviceMeshShaderFeaturesNV::taskShader feature enabled.", caller); } return skip; } // Loop through bound objects and increment their in_use counts. void CoreChecks::IncrementBoundObjects(CMD_BUFFER_STATE const *cb_node) { for (auto obj : cb_node->object_bindings) { auto base_obj = GetStateStructPtrFromObject(obj); if (base_obj) { base_obj->in_use.fetch_add(1); } } } // Track which resources are in-flight by atomically incrementing their "in_use" count void CoreChecks::IncrementResources(CMD_BUFFER_STATE *cb_node) { cb_node->submitCount++; cb_node->in_use.fetch_add(1); // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below IncrementBoundObjects(cb_node); // TODO : We should be able to remove the NULL look-up checks from the code below as long as // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state // should then be flagged prior to calling this function for (auto draw_data_element : cb_node->draw_data) { for (auto &vertex_buffer : draw_data_element.vertex_buffer_bindings) { auto buffer_state = GetBufferState(vertex_buffer.buffer); if (buffer_state) { buffer_state->in_use.fetch_add(1); } } } for (auto event : cb_node->writeEventsBeforeWait) { auto event_state = GetEventState(event); if (event_state) event_state->write_in_use++; } } // Note: This function assumes that the global lock is held by the calling thread. // For the given queue, verify the queue state up to the given seq number. // Currently the only check is to make sure that if there are events to be waited on prior to // a QueryReset, make sure that all such events have been signalled. bool CoreChecks::VerifyQueueStateToSeq(QUEUE_STATE *initial_queue, uint64_t initial_seq) { bool skip = false; // sequence number we want to validate up to, per queue std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}}; // sequence number we've completed validation for, per queue std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs; std::vector<QUEUE_STATE *> worklist{initial_queue}; while (worklist.size()) { auto queue = worklist.back(); worklist.pop_back(); auto target_seq = target_seqs[queue]; auto seq = std::max(done_seqs[queue], queue->seq); auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq for (; seq < target_seq; ++sub_it, ++seq) { for (auto &wait : sub_it->waitSemaphores) { auto other_queue = GetQueueState(wait.queue); if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here. auto other_target_seq = std::max(target_seqs[other_queue], wait.seq); auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq); // if this wait is for another queue, and covers new sequence // numbers beyond what we've already validated, mark the new // target seq and (possibly-re)add the queue to the worklist. if (other_done_seq < other_target_seq) { target_seqs[other_queue] = other_target_seq; worklist.push_back(other_queue); } } } // finally mark the point we've now validated this queue to. done_seqs[queue] = seq; } return skip; } // When the given fence is retired, verify outstanding queue operations through the point of the fence bool CoreChecks::VerifyQueueStateToFence(VkFence fence) { auto fence_state = GetFenceState(fence); if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) { return VerifyQueueStateToSeq(GetQueueState(fence_state->signaler.first), fence_state->signaler.second); } return false; } // Decrement in-use count for objects bound to command buffer void CoreChecks::DecrementBoundResources(CMD_BUFFER_STATE const *cb_node) { BASE_NODE *base_obj = nullptr; for (auto obj : cb_node->object_bindings) { base_obj = GetStateStructPtrFromObject(obj); if (base_obj) { base_obj->in_use.fetch_sub(1); } } } void CoreChecks::RetireWorkOnQueue(QUEUE_STATE *pQueue, uint64_t seq) { std::unordered_map<VkQueue, uint64_t> otherQueueSeqs; // Roll this queue forward, one submission at a time. while (pQueue->seq < seq) { auto &submission = pQueue->submissions.front(); for (auto &wait : submission.waitSemaphores) { auto pSemaphore = GetSemaphoreState(wait.semaphore); if (pSemaphore) { pSemaphore->in_use.fetch_sub(1); } auto &lastSeq = otherQueueSeqs[wait.queue]; lastSeq = std::max(lastSeq, wait.seq); } for (auto &semaphore : submission.signalSemaphores) { auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore) { pSemaphore->in_use.fetch_sub(1); } } for (auto &semaphore : submission.externalSemaphores) { auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore) { pSemaphore->in_use.fetch_sub(1); } } for (auto cb : submission.cbs) { auto cb_node = GetCBState(cb); if (!cb_node) { continue; } // First perform decrement on general case bound objects DecrementBoundResources(cb_node); for (auto draw_data_element : cb_node->draw_data) { for (auto &vertex_buffer_binding : draw_data_element.vertex_buffer_bindings) { auto buffer_state = GetBufferState(vertex_buffer_binding.buffer); if (buffer_state) { buffer_state->in_use.fetch_sub(1); } } } for (auto event : cb_node->writeEventsBeforeWait) { auto eventNode = eventMap.find(event); if (eventNode != eventMap.end()) { eventNode->second.write_in_use--; } } for (auto queryStatePair : cb_node->queryToStateMap) { queryToStateMap[queryStatePair.first] = queryStatePair.second; } for (auto eventStagePair : cb_node->eventToStageMap) { eventMap[eventStagePair.first].stageMask = eventStagePair.second; } cb_node->in_use.fetch_sub(1); } auto pFence = GetFenceState(submission.fence); if (pFence && pFence->scope == kSyncScopeInternal) { pFence->state = FENCE_RETIRED; } pQueue->submissions.pop_front(); pQueue->seq++; } // Roll other queues forward to the highest seq we saw a wait for for (auto qs : otherQueueSeqs) { RetireWorkOnQueue(GetQueueState(qs.first), qs.second); } } // Submit a fence to a queue, delimiting previous fences and previous untracked // work by it. static void SubmitFence(QUEUE_STATE *pQueue, FENCE_STATE *pFence, uint64_t submitCount) { pFence->state = FENCE_INFLIGHT; pFence->signaler.first = pQueue->queue; pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount; } bool CoreChecks::ValidateCommandBufferSimultaneousUse(CMD_BUFFER_STATE *pCB, int current_submit_count) { bool skip = false; if ((pCB->in_use.load() || current_submit_count > 1) && !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, "VUID-vkQueueSubmit-pCommandBuffers-00071", "%s is already in use and is not marked for simultaneous use.", report_data->FormatHandle(pCB->commandBuffer).c_str()); } return skip; } bool CoreChecks::ValidateCommandBufferState(CMD_BUFFER_STATE *cb_state, const char *call_source, int current_submit_count, const char *vu_id) { bool skip = false; if (disabled.command_buffer_state) return skip; // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (cb_state->submitCount + current_submit_count > 1)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, kVUID_Core_DrawState_CommandBufferSingleSubmitViolation, "%s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64 "times.", report_data->FormatHandle(cb_state->commandBuffer).c_str(), cb_state->submitCount + current_submit_count); } // Validate that cmd buffers have been updated switch (cb_state->state) { case CB_INVALID_INCOMPLETE: case CB_INVALID_COMPLETE: skip |= ReportInvalidCommandBuffer(cb_state, call_source); break; case CB_NEW: skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(cb_state->commandBuffer), vu_id, "%s used in the call to %s is unrecorded and contains no commands.", report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source); break; case CB_RECORDING: skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), kVUID_Core_DrawState_NoEndCommandBuffer, "You must call vkEndCommandBuffer() on %s before this call to %s!", report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source); break; default: /* recorded */ break; } return skip; } bool CoreChecks::ValidateResources(CMD_BUFFER_STATE *cb_node) { bool skip = false; // TODO : We should be able to remove the NULL look-up checks from the code below as long as // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state // should then be flagged prior to calling this function for (const auto &draw_data_element : cb_node->draw_data) { for (const auto &vertex_buffer_binding : draw_data_element.vertex_buffer_bindings) { auto buffer_state = GetBufferState(vertex_buffer_binding.buffer); if ((vertex_buffer_binding.buffer != VK_NULL_HANDLE) && (!buffer_state)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(vertex_buffer_binding.buffer), kVUID_Core_DrawState_InvalidBuffer, "Cannot submit cmd buffer using deleted %s.", report_data->FormatHandle(vertex_buffer_binding.buffer).c_str()); } } } return skip; } // Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices bool CoreChecks::ValidImageBufferQueue(CMD_BUFFER_STATE *cb_node, const VulkanTypedHandle &object, VkQueue queue, uint32_t count, const uint32_t *indices) { bool found = false; bool skip = false; auto queue_state = GetQueueState(queue); if (queue_state) { for (uint32_t i = 0; i < count; i++) { if (indices[i] == queue_state->queueFamilyIndex) { found = true; break; } } if (!found) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object.type], object.handle, kVUID_Core_DrawState_InvalidQueueFamily, "vkQueueSubmit: %s contains %s which was not created allowing concurrent access to " "this queue family %d.", report_data->FormatHandle(cb_node->commandBuffer).c_str(), report_data->FormatHandle(object).c_str(), queue_state->queueFamilyIndex); } } return skip; } // Validate that queueFamilyIndices of primary command buffers match this queue // Secondary command buffers were previously validated in vkCmdExecuteCommands(). bool CoreChecks::ValidateQueueFamilyIndices(CMD_BUFFER_STATE *pCB, VkQueue queue) { bool skip = false; auto pPool = GetCommandPoolState(pCB->createInfo.commandPool); auto queue_state = GetQueueState(queue); if (pPool && queue_state) { if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), "VUID-vkQueueSubmit-pCommandBuffers-00074", "vkQueueSubmit: Primary %s created in queue family %d is being submitted on %s " "from queue family %d.", report_data->FormatHandle(pCB->commandBuffer).c_str(), pPool->queueFamilyIndex, report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex); } // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family for (const auto &object : pCB->object_bindings) { if (object.type == kVulkanObjectTypeImage) { auto image_state = GetImageState(object.Cast<VkImage>()); if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { skip |= ValidImageBufferQueue(pCB, object, queue, image_state->createInfo.queueFamilyIndexCount, image_state->createInfo.pQueueFamilyIndices); } } else if (object.type == kVulkanObjectTypeBuffer) { auto buffer_state = GetBufferState(object.Cast<VkBuffer>()); if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { skip |= ValidImageBufferQueue(pCB, object, queue, buffer_state->createInfo.queueFamilyIndexCount, buffer_state->createInfo.pQueueFamilyIndices); } } } } return skip; } bool CoreChecks::ValidatePrimaryCommandBufferState(CMD_BUFFER_STATE *pCB, int current_submit_count, QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards, QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) { // Track in-use for resources off of primary and any secondary CBs bool skip = false; // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing // on device skip |= ValidateCommandBufferSimultaneousUse(pCB, current_submit_count); skip |= ValidateResources(pCB); skip |= ValidateQueuedQFOTransfers(pCB, qfo_image_scoreboards, qfo_buffer_scoreboards); for (auto pSubCB : pCB->linkedCommandBuffers) { skip |= ValidateResources(pSubCB); skip |= ValidateQueuedQFOTransfers(pSubCB, qfo_image_scoreboards, qfo_buffer_scoreboards); // TODO: replace with InvalidateCommandBuffers() at recording. if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) && !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, "VUID-vkQueueSubmit-pCommandBuffers-00073", "%s was submitted with secondary %s but that buffer has subsequently been bound to " "primary %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(pSubCB->commandBuffer).c_str(), report_data->FormatHandle(pSubCB->primaryCommandBuffer).c_str()); } } skip |= ValidateCommandBufferState(pCB, "vkQueueSubmit()", current_submit_count, "VUID-vkQueueSubmit-pCommandBuffers-00072"); return skip; } bool CoreChecks::ValidateFenceForSubmit(FENCE_STATE *pFence) { bool skip = false; if (pFence && pFence->scope == kSyncScopeInternal) { if (pFence->state == FENCE_INFLIGHT) { // TODO: opportunities for "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueBindSparse-fence-01114", // "VUID-vkAcquireNextImageKHR-fence-01287" skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(pFence->fence), kVUID_Core_DrawState_InvalidFence, "%s is already in use by another submission.", report_data->FormatHandle(pFence->fence).c_str()); } else if (pFence->state == FENCE_RETIRED) { // TODO: opportunities for "VUID-vkQueueSubmit-fence-00063", "VUID-vkQueueBindSparse-fence-01113", // "VUID-vkAcquireNextImageKHR-fence-01287" skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(pFence->fence), kVUID_Core_MemTrack_FenceState, "%s submitted in SIGNALED state. Fences must be reset before being submitted", report_data->FormatHandle(pFence->fence).c_str()); } } return skip; } void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence, VkResult result) { uint64_t early_retire_seq = 0; auto pQueue = GetQueueState(queue); auto pFence = GetFenceState(fence); if (pFence) { if (pFence->scope == kSyncScopeInternal) { // Mark fence in use SubmitFence(pQueue, pFence, std::max(1u, submitCount)); if (!submitCount) { // If no submissions, but just dropping a fence on the end of the queue, // record an empty submission with just the fence, so we can determine // its completion. pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence); } } else { // Retire work up until this fence early, we will not see the wait that corresponds to this signal early_retire_seq = pQueue->seq + pQueue->submissions.size(); if (!external_sync_warning) { external_sync_warning = true; log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence), kVUID_Core_DrawState_QueueForwardProgress, "vkQueueSubmit(): Signaling external %s on %s will disable validation of preceding command " "buffer lifecycle states and the in-use status of associated objects.", report_data->FormatHandle(fence).c_str(), report_data->FormatHandle(queue).c_str()); } } } // Now process each individual submit for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { std::vector<VkCommandBuffer> cbs; const VkSubmitInfo *submit = &pSubmits[submit_idx]; vector<SEMAPHORE_WAIT> semaphore_waits; vector<VkSemaphore> semaphore_signals; vector<VkSemaphore> semaphore_externals; for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pWaitSemaphores[i]; auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore) { if (pSemaphore->scope == kSyncScopeInternal) { if (pSemaphore->signaler.first != VK_NULL_HANDLE) { semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second}); pSemaphore->in_use.fetch_add(1); } pSemaphore->signaler.first = VK_NULL_HANDLE; pSemaphore->signaled = false; } else { semaphore_externals.push_back(semaphore); pSemaphore->in_use.fetch_add(1); if (pSemaphore->scope == kSyncScopeExternalTemporary) { pSemaphore->scope = kSyncScopeInternal; } } } } for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pSignalSemaphores[i]; auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore) { if (pSemaphore->scope == kSyncScopeInternal) { pSemaphore->signaler.first = queue; pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1; pSemaphore->signaled = true; pSemaphore->in_use.fetch_add(1); semaphore_signals.push_back(semaphore); } else { // Retire work up until this submit early, we will not see the wait that corresponds to this signal early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1); if (!external_sync_warning) { external_sync_warning = true; log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "vkQueueSubmit(): Signaling external %s on %s will disable validation of preceding " "command buffer lifecycle states and the in-use status of associated objects.", report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(queue).c_str()); } } } } for (uint32_t i = 0; i < submit->commandBufferCount; i++) { auto cb_node = GetCBState(submit->pCommandBuffers[i]); if (cb_node) { cbs.push_back(submit->pCommandBuffers[i]); for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) { cbs.push_back(secondaryCmdBuffer->commandBuffer); UpdateCmdBufImageLayouts(secondaryCmdBuffer); IncrementResources(secondaryCmdBuffer); RecordQueuedQFOTransfers(secondaryCmdBuffer); } UpdateCmdBufImageLayouts(cb_node); IncrementResources(cb_node); RecordQueuedQFOTransfers(cb_node); } } pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals, submit_idx == submitCount - 1 ? fence : (VkFence)VK_NULL_HANDLE); } if (early_retire_seq) { RetireWorkOnQueue(pQueue, early_retire_seq); } if (enabled.gpu_validation) { GpuPostCallQueueSubmit(queue, submitCount, pSubmits, fence); } } bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) { auto pFence = GetFenceState(fence); bool skip = ValidateFenceForSubmit(pFence); if (skip) { return true; } unordered_set<VkSemaphore> signaled_semaphores; unordered_set<VkSemaphore> unsignaled_semaphores; unordered_set<VkSemaphore> internal_semaphores; vector<VkCommandBuffer> current_cmds; unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_STATE> localImageLayoutMap; // Now verify each individual submit for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { skip |= ValidateStageMaskGsTsEnables( submit->pWaitDstStageMask[i], "vkQueueSubmit()", "VUID-VkSubmitInfo-pWaitDstStageMask-00076", "VUID-VkSubmitInfo-pWaitDstStageMask-00077", "VUID-VkSubmitInfo-pWaitDstStageMask-02089", "VUID-VkSubmitInfo-pWaitDstStageMask-02090"); VkSemaphore semaphore = submit->pWaitSemaphores[i]; auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (unsignaled_semaphores.count(semaphore) || (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "%s is waiting on %s that has no way to be signaled.", report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str()); } else { signaled_semaphores.erase(semaphore); unsignaled_semaphores.insert(semaphore); } } if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) { internal_semaphores.insert(semaphore); } } for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pSignalSemaphores[i]; auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "%s is signaling %s that was previously signaled by %s but has not since " "been waited on by any queue.", report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(pSemaphore->signaler.first).c_str()); } else { unsignaled_semaphores.erase(semaphore); signaled_semaphores.insert(semaphore); } } } QFOTransferCBScoreboards<VkImageMemoryBarrier> qfo_image_scoreboards; QFOTransferCBScoreboards<VkBufferMemoryBarrier> qfo_buffer_scoreboards; for (uint32_t i = 0; i < submit->commandBufferCount; i++) { auto cb_node = GetCBState(submit->pCommandBuffers[i]); if (cb_node) { skip |= ValidateCmdBufImageLayouts(cb_node, imageLayoutMap, localImageLayoutMap); current_cmds.push_back(submit->pCommandBuffers[i]); skip |= ValidatePrimaryCommandBufferState( cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]), &qfo_image_scoreboards, &qfo_buffer_scoreboards); skip |= ValidateQueueFamilyIndices(cb_node, queue); // Potential early exit here as bad object state may crash in delayed function calls if (skip) { return true; } // Call submit-time functions to validate/update state for (auto &function : cb_node->queue_submit_functions) { skip |= function(); } for (auto &function : cb_node->eventUpdates) { skip |= function(queue); } for (auto &function : cb_node->queryUpdates) { skip |= function(queue); } } } auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupSubmitInfo>(submit->pNext); if (chained_device_group_struct && chained_device_group_struct->commandBufferCount > 0) { for (uint32_t i = 0; i < chained_device_group_struct->commandBufferCount; ++i) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->pCommandBufferDeviceMasks[i], VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue), "VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086"); } } } return skip; } void CoreChecks::PreCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) { if (enabled.gpu_validation && device_extensions.vk_ext_descriptor_indexing) { GpuPreCallRecordQueueSubmit(queue, submitCount, pSubmits, fence); } } #ifdef VK_USE_PLATFORM_ANDROID_KHR // Android-specific validation that uses types defined only on Android and only for NDK versions // that support the VK_ANDROID_external_memory_android_hardware_buffer extension. // This chunk could move into a seperate core_validation_android.cpp file... ? // clang-format off // Map external format and usage flags to/from equivalent Vulkan flags // (Tables as of v1.1.92) // AHardwareBuffer Format Vulkan Format // ====================== ============= // AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM VK_FORMAT_R8G8B8A8_UNORM // AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM VK_FORMAT_R8G8B8A8_UNORM // AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM VK_FORMAT_R8G8B8_UNORM // AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM VK_FORMAT_R5G6B5_UNORM_PACK16 // AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT VK_FORMAT_R16G16B16A16_SFLOAT // AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM VK_FORMAT_A2B10G10R10_UNORM_PACK32 // AHARDWAREBUFFER_FORMAT_D16_UNORM VK_FORMAT_D16_UNORM // AHARDWAREBUFFER_FORMAT_D24_UNORM VK_FORMAT_X8_D24_UNORM_PACK32 // AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT VK_FORMAT_D24_UNORM_S8_UINT // AHARDWAREBUFFER_FORMAT_D32_FLOAT VK_FORMAT_D32_SFLOAT // AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT VK_FORMAT_D32_SFLOAT_S8_UINT // AHARDWAREBUFFER_FORMAT_S8_UINT VK_FORMAT_S8_UINT // The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan // as uint32_t. Casting the enums here avoids scattering casts around in the code. std::map<uint32_t, VkFormat> ahb_format_map_a2v = { { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, VK_FORMAT_R8G8B8A8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT, VK_FORMAT_R16G16B16A16_SFLOAT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM, VK_FORMAT_A2B10G10R10_UNORM_PACK32 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM, VK_FORMAT_X8_D24_UNORM_PACK32 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT, VK_FORMAT_D32_SFLOAT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT, VK_FORMAT_S8_UINT } }; // AHardwareBuffer Usage Vulkan Usage or Creation Flag (Intermixed - Aargh!) // ===================== =================================================== // None VK_IMAGE_USAGE_TRANSFER_SRC_BIT // None VK_IMAGE_USAGE_TRANSFER_DST_BIT // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_SAMPLED_BIT // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE None // AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT VK_IMAGE_CREATE_PROTECTED_BIT // None VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT // None VK_IMAGE_CREATE_EXTENDED_USAGE_BIT // Same casting rationale. De-mixing the table to prevent type confusion and aliasing std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = { { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent }; std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = { { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT, VK_IMAGE_CREATE_PROTECTED_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent }; std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = { { VK_IMAGE_USAGE_SAMPLED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE }, { VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE }, { VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT }, }; std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = { { VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP }, { VK_IMAGE_CREATE_PROTECTED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT }, }; // clang-format on // // AHB-extension new APIs // bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID(VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) { bool skip = false; // buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags. AHardwareBuffer_Desc ahb_desc; AHardwareBuffer_describe(buffer, &ahb_desc); uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE | AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER; if (0 == (ahb_desc.usage & required_flags)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884", "vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64 ") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.", ahb_desc.usage); } return skip; } void CoreChecks::PostCallRecordGetAndroidHardwareBufferPropertiesANDROID(VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties, VkResult result) { if (VK_SUCCESS != result) return; auto ahb_format_props = lvl_find_in_chain<VkAndroidHardwareBufferFormatPropertiesANDROID>(pProperties->pNext); if (ahb_format_props) { ahb_ext_formats_set.insert(ahb_format_props->externalFormat); } } bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo, struct AHardwareBuffer **pBuffer) { bool skip = false; DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory); // VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in // VkExportMemoryAllocateInfoKHR::handleTypes when memory was created. if (!mem_info->is_export || (0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882", "vkGetMemoryAndroidHardwareBufferANDROID: %s was not allocated for export, or the " "export handleTypes (0x%" PRIx32 ") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.", report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags); } // If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo // with non-NULL image member, then that image must already be bound to memory. if (mem_info->is_dedicated && (VK_NULL_HANDLE != mem_info->dedicated_image)) { auto image_state = GetImageState(mem_info->dedicated_image); if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count(pInfo->memory)))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883", "vkGetMemoryAndroidHardwareBufferANDROID: %s was allocated using a dedicated " "%s, but that image is not bound to the VkDeviceMemory object.", report_data->FormatHandle(pInfo->memory).c_str(), report_data->FormatHandle(mem_info->dedicated_image).c_str()); } } return skip; } // // AHB-specific validation within non-AHB APIs // bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) { bool skip = false; auto import_ahb_info = lvl_find_in_chain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext); auto exp_mem_alloc_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(alloc_info->pNext); auto mem_ded_alloc_info = lvl_find_in_chain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext); if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) { // This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID AHardwareBuffer_Desc ahb_desc = {}; AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc); // If buffer is not NULL, it must be a valid Android hardware buffer object with AHardwareBuffer_Desc::format and // AHardwareBuffer_Desc::usage compatible with Vulkan as described in Android Hardware Buffers. // // BLOB & GPU_DATA_BUFFER combo specifically allowed if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) { // Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables // Usage must have at least one bit from the table. It may have additional bits not in the table uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE | AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT; if ((0 == (ahb_desc.usage & ahb_equiv_usage_bits)) || (0 == ahb_format_map_a2v.count(ahb_desc.format))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881", "vkAllocateMemory: The AHardwareBuffer_Desc's format ( %u ) and/or usage ( 0x%" PRIx64 " ) are not compatible with Vulkan.", ahb_desc.format, ahb_desc.usage); } } // Collect external buffer info VkPhysicalDeviceExternalBufferInfo pdebi = {}; pdebi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO; pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) { pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE]; } if (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT & ahb_desc.usage) { pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT]; } VkExternalBufferProperties ext_buf_props = {}; ext_buf_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES; DispatchGetPhysicalDeviceExternalBufferProperties(physical_device, &pdebi, &ext_buf_props); // Collect external format info VkPhysicalDeviceExternalImageFormatInfo pdeifi = {}; pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO; pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; VkPhysicalDeviceImageFormatInfo2 pdifi2 = {}; pdifi2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2; pdifi2.pNext = &pdeifi; if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format]; pdifi2.type = VK_IMAGE_TYPE_2D; // Seems likely pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL; // Ditto if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) { pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE]; } if (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT & ahb_desc.usage) { pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT]; } if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) { pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP]; } if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) { pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT]; } VkExternalImageFormatProperties ext_img_fmt_props = {}; ext_img_fmt_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES; VkImageFormatProperties2 ifp2 = {}; ifp2.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2; ifp2.pNext = &ext_img_fmt_props; VkResult fmt_lookup_result = GetPDImageFormatProperties2(&pdifi2, &ifp2); // If buffer is not NULL, Android hardware buffers must be supported for import, as reported by // VkExternalImageFormatProperties or VkExternalBufferProperties. if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) { if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880", "vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties " "structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag."); } } // Retrieve buffer and format properties of the provided AHardwareBuffer VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {}; ahb_format_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID; VkAndroidHardwareBufferPropertiesANDROID ahb_props = {}; ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; ahb_props.pNext = &ahb_format_props; DispatchGetAndroidHardwareBufferPropertiesANDROID(device, import_ahb_info->buffer, &ahb_props); // allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer if (alloc_info->allocationSize != ahb_props.allocationSize) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-allocationSize-02383", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct, allocationSize (%" PRId64 ") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").", alloc_info->allocationSize, ahb_props.allocationSize); } // memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer // Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex; if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct, memoryTypeIndex (%" PRId32 ") does not correspond to a bit set in AHardwareBuffer's reported " "memoryTypeBits bitmask (0x%" PRIx32 ").", alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits); } // Checks for allocations without a dedicated allocation requirement if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) { // the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes // AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02384", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct without a dedicated allocation requirement, while the AHardwareBuffer_Desc's format ( %u ) is not " "AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64 ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.", ahb_desc.format, ahb_desc.usage); } } else { // Checks specific to import with a dedicated allocation requirement VkImageCreateInfo *ici = &(GetImageState(mem_ded_alloc_info->image)->createInfo); // The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT or // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02386", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a " "dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64 ") contains neither AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.", ahb_desc.usage); } // the format of image must be VK_FORMAT_UNDEFINED or the format returned by // vkGetAndroidHardwareBufferPropertiesANDROID if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02387", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's " "format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).", string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format)); } // The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) || (ici->arrayLayers != ahb_desc.layers)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02388", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's " "width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32 ") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").", ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height, ahb_desc.layers); } // If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must // have either a full mipmap chain or exactly 1 mip level. // // NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead, // its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates // that the Android hardware buffer contains only a single mip level." // // TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct. // Clarification requested. if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) && (ici->mipLevels != FullMipChainLevels(ici->extent))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02389", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32 ") is neither 1 nor full mip " "chain levels (%" PRId32 ").", ici->mipLevels, FullMipChainLevels(ici->extent)); } // each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a // corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's // AHardwareBuffer_Desc::usage if (ici->usage & ~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02390", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "dedicated image usage bits include one or more with no AHardwareBuffer equivalent."); } bool illegal_usage = false; std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT}; for (VkImageUsageFlags ubit : usages) { if (ici->usage & ubit) { uint64_t ahb_usage = ahb_usage_map_v2a[ubit]; if (0 == (ahb_usage & ahb_desc.usage)) illegal_usage = true; } } if (illegal_usage) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-02390", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, one or more AHardwareBuffer usage bits equivalent to " "the provided image's usage bits are missing from AHardwareBuffer_Desc.usage."); } } } else { // Not an import if ((exp_mem_alloc_info) && (mem_ded_alloc_info) && (0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) && (VK_NULL_HANDLE != mem_ded_alloc_info->image)) { // This is an Android HW Buffer export if (0 != alloc_info->allocationSize) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-01874", "vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, " "but allocationSize is non-zero."); } } else { if (0 == alloc_info->allocationSize) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-pNext-01874", "vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0."); }; } } return skip; } bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(const VkImage image) { bool skip = false; IMAGE_STATE *image_state = GetImageState(image); if (image_state->imported_ahb && (0 == image_state->GetBoundMemory().size())) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), "VUID-VkImageMemoryRequirementsInfo2-image-01897", "vkGetImageMemoryRequirements2: Attempt to query layout from an image created with " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been " "bound to memory."); } return skip; } static bool ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data *report_data, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) { bool skip = false; const VkAndroidHardwareBufferUsageANDROID *ahb_usage = lvl_find_in_chain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext); if (nullptr != ahb_usage) { const VkPhysicalDeviceExternalImageFormatInfo *pdeifi = lvl_find_in_chain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext); if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868", "vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained " "VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained " "VkPhysicalDeviceExternalImageFormatInfo struct with handleType " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID."); } } return skip; } bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info) { const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext); if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) { if (VK_FORMAT_UNDEFINED != create_info->format) { return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904", "vkCreateSamplerYcbcrConversion[KHR]: CreateInfo format is not VK_FORMAT_UNDEFINED while " "there is a chained VkExternalFormatANDROID struct."); } } else if (VK_FORMAT_UNDEFINED == create_info->format) { return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904", "vkCreateSamplerYcbcrConversion[KHR]: CreateInfo format is VK_FORMAT_UNDEFINED with no chained " "VkExternalFormatANDROID struct."); } return false; } void CoreChecks::RecordCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info, VkSamplerYcbcrConversion ycbcr_conversion) { const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext); if (ext_format_android && (0 != ext_format_android->externalFormat)) { ycbcr_conversion_ahb_fmt_map.emplace(ycbcr_conversion, ext_format_android->externalFormat); } }; void CoreChecks::RecordDestroySamplerYcbcrConversionANDROID(VkSamplerYcbcrConversion ycbcr_conversion) { ycbcr_conversion_ahb_fmt_map.erase(ycbcr_conversion); }; #else // !VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) { return false; } static bool ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data *report_data, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) { return false; } bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info) { return false; } bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(const VkImage image) { return false; } void CoreChecks::RecordCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info, VkSamplerYcbcrConversion ycbcr_conversion){}; void CoreChecks::RecordDestroySamplerYcbcrConversionANDROID(VkSamplerYcbcrConversion ycbcr_conversion){}; #endif // VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) { bool skip = false; if (memObjMap.size() >= phys_dev_props.limits.maxMemoryAllocationCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUIDUndefined, "Number of currently valid memory objects is not less than the maximum allowed (%u).", phys_dev_props.limits.maxMemoryAllocationCount); } if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateAllocateMemoryANDROID(pAllocateInfo); } else { if (0 == pAllocateInfo->allocationSize) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateInfo-allocationSize-00638", "vkAllocateMemory: allocationSize is 0."); }; } auto chained_flags_struct = lvl_find_in_chain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext); if (chained_flags_struct && chained_flags_struct->flags == VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_flags_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675"); skip |= ValidateDeviceMaskToZero(chained_flags_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676"); } // TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744 return skip; } void CoreChecks::PostCallRecordAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory, VkResult result) { if (VK_SUCCESS == result) { AddMemObjInfo(device, *pMemory, pAllocateInfo); } return; } // For given obj node, if it is use, flag a validation error and return callback result, else return false bool CoreChecks::ValidateObjectNotInUse(BASE_NODE *obj_node, const VulkanTypedHandle &obj_struct, const char *caller_name, const char *error_code) { if (disabled.object_in_use) return false; bool skip = false; if (obj_node->in_use.load()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle, error_code, "Cannot call %s on %s that is currently in use by a command buffer.", caller_name, report_data->FormatHandle(obj_struct).c_str()); } return skip; } bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) { DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem); const VulkanTypedHandle obj_struct(mem, kVulkanObjectTypeDeviceMemory); bool skip = false; if (mem_info) { skip |= ValidateObjectNotInUse(mem_info, obj_struct, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677"); } return skip; } void CoreChecks::PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) { if (!mem) return; DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem); const VulkanTypedHandle obj_struct(mem, kVulkanObjectTypeDeviceMemory); // Clear mem binding for any bound objects for (const auto &obj : mem_info->obj_bindings) { log_msg(report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle, kVUID_Core_MemTrack_FreedMemRef, "%s still has a reference to %s.", report_data->FormatHandle(obj).c_str(), report_data->FormatHandle(mem_info->mem).c_str()); BINDABLE *bindable_state = nullptr; switch (obj.type) { case kVulkanObjectTypeImage: bindable_state = GetImageState(obj.Cast<VkImage>()); break; case kVulkanObjectTypeBuffer: bindable_state = GetBufferState(obj.Cast<VkBuffer>()); break; default: // Should only have buffer or image objects bound to memory assert(0); } assert(bindable_state); bindable_state->binding.mem = MEMORY_UNBOUND; bindable_state->UpdateBoundMemorySet(); } // Any bound cmd buffers are now invalid InvalidateCommandBuffers(mem_info->cb_bindings, obj_struct); memObjMap.erase(mem); } // Validate that given Map memory range is valid. This means that the memory should not already be mapped, // and that the size of the map range should be: // 1. Not zero // 2. Within the size of the memory allocation bool CoreChecks::ValidateMapMemRange(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { bool skip = false; if (size == 0) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap, "VkMapMemory: Attempting to map memory range of size zero"); } auto mem_element = memObjMap.find(mem); if (mem_element != memObjMap.end()) { auto mem_info = mem_element->second.get(); // It is an application error to call VkMapMemory on an object that is already mapped if (mem_info->mem_range.size != 0) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap, "VkMapMemory: Attempting to map memory on an already-mapped %s.", report_data->FormatHandle(mem).c_str()); } // Validate that offset + size is within object's allocationSize if (size == VK_WHOLE_SIZE) { if (offset >= mem_info->alloc_info.allocationSize) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap, "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64, offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize); } } else { if ((offset + size) > mem_info->alloc_info.allocationSize) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), "VUID-vkMapMemory-size-00681", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".", offset, size + offset, mem_info->alloc_info.allocationSize); } } } return skip; } void CoreChecks::StoreMemRanges(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { auto mem_info = GetDevMemState(mem); if (mem_info) { mem_info->mem_range.offset = offset; mem_info->mem_range.size = size; } } // Guard value for pad data static char NoncoherentMemoryFillValue = 0xb; void CoreChecks::InitializeAndTrackMemory(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, void **ppData) { auto mem_info = GetDevMemState(mem); if (mem_info) { mem_info->p_driver_data = *ppData; uint32_t index = mem_info->alloc_info.memoryTypeIndex; if (phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) { mem_info->shadow_copy = 0; } else { if (size == VK_WHOLE_SIZE) { size = mem_info->alloc_info.allocationSize - offset; } mem_info->shadow_pad_size = phys_dev_props.limits.minMemoryMapAlignment; assert(SafeModulo(mem_info->shadow_pad_size, phys_dev_props.limits.minMemoryMapAlignment) == 0); // Ensure start of mapped region reflects hardware alignment constraints uint64_t map_alignment = phys_dev_props.limits.minMemoryMapAlignment; // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment. uint64_t start_offset = offset % map_alignment; // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes. mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset)); mem_info->shadow_copy = reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) & ~(map_alignment - 1)) + start_offset; assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset, map_alignment) == 0); memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size)); *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size; } } } void CoreChecks::RetireFence(VkFence fence) { auto pFence = GetFenceState(fence); if (pFence && pFence->scope == kSyncScopeInternal) { if (pFence->signaler.first != VK_NULL_HANDLE) { // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed. RetireWorkOnQueue(GetQueueState(pFence->signaler.first), pFence->signaler.second); } else { // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark // the fence as retired. pFence->state = FENCE_RETIRED; } } } bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) { // Verify fence status of submitted fences bool skip = false; for (uint32_t i = 0; i < fenceCount; i++) { skip |= VerifyQueueStateToFence(pFences[i]); } return skip; } void CoreChecks::PostCallRecordWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout, VkResult result) { if (VK_SUCCESS != result) return; // When we know that all fences are complete we can clean/remove their CBs if ((VK_TRUE == waitAll) || (1 == fenceCount)) { for (uint32_t i = 0; i < fenceCount; i++) { RetireFence(pFences[i]); } } // NOTE : Alternate case not handled here is when some fences have completed. In // this case for app to guarantee which fences completed it will have to call // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete. } void CoreChecks::PostCallRecordGetFenceStatus(VkDevice device, VkFence fence, VkResult result) { if (VK_SUCCESS != result) return; RetireFence(fence); } void CoreChecks::RecordGetDeviceQueueState(uint32_t queue_family_index, VkQueue queue) { // Add queue to tracking set only if it is new auto queue_is_new = queues.emplace(queue); if (queue_is_new.second == true) { QUEUE_STATE *queue_state = &queueMap[queue]; queue_state->queue = queue; queue_state->queueFamilyIndex = queue_family_index; queue_state->seq = 0; } } bool CoreChecks::ValidateGetDeviceQueue(uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue, const char *valid_qfi_vuid, const char *qfi_in_range_vuid) { bool skip = false; skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex", valid_qfi_vuid); const auto &queue_data = queue_family_index_map.find(queueFamilyIndex); if (queue_data != queue_family_index_map.end() && queue_data->second <= queueIndex) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), qfi_in_range_vuid, "vkGetDeviceQueue: queueIndex (=%" PRIu32 ") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32 ") when the device was created (i.e. is not less than %" PRIu32 ").", queueIndex, queueFamilyIndex, queue_data->second); } return skip; } bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) { return ValidateGetDeviceQueue(queueFamilyIndex, queueIndex, pQueue, "VUID-vkGetDeviceQueue-queueFamilyIndex-00384", "VUID-vkGetDeviceQueue-queueIndex-00385"); } void CoreChecks::PostCallRecordGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) { RecordGetDeviceQueueState(queueFamilyIndex, *pQueue); } void CoreChecks::PostCallRecordGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) { RecordGetDeviceQueueState(pQueueInfo->queueFamilyIndex, *pQueue); } bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) { QUEUE_STATE *queue_state = GetQueueState(queue); return VerifyQueueStateToSeq(queue_state, queue_state->seq + queue_state->submissions.size()); } void CoreChecks::PostCallRecordQueueWaitIdle(VkQueue queue, VkResult result) { if (VK_SUCCESS != result) return; QUEUE_STATE *queue_state = GetQueueState(queue); RetireWorkOnQueue(queue_state, queue_state->seq + queue_state->submissions.size()); } bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) { bool skip = false; for (auto &queue : queueMap) { skip |= VerifyQueueStateToSeq(&queue.second, queue.second.seq + queue.second.submissions.size()); } return skip; } void CoreChecks::PostCallRecordDeviceWaitIdle(VkDevice device, VkResult result) { if (VK_SUCCESS != result) return; for (auto &queue : queueMap) { RetireWorkOnQueue(&queue.second, queue.second.seq + queue.second.submissions.size()); } } bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) { FENCE_STATE *fence_node = GetFenceState(fence); bool skip = false; if (fence_node) { if (fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence), "VUID-vkDestroyFence-fence-01120", "%s is in use.", report_data->FormatHandle(fence).c_str()); } } return skip; } void CoreChecks::PreCallRecordDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) { if (!fence) return; fenceMap.erase(fence); } bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) { SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore); const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore); bool skip = false; if (sema_node) { skip |= ValidateObjectNotInUse(sema_node, obj_struct, "vkDestroySemaphore", "VUID-vkDestroySemaphore-semaphore-01137"); } return skip; } void CoreChecks::PreCallRecordDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) { if (!semaphore) return; semaphoreMap.erase(semaphore); } bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) { EVENT_STATE *event_state = GetEventState(event); const VulkanTypedHandle obj_struct(event, kVulkanObjectTypeEvent); bool skip = false; if (event_state) { skip |= ValidateObjectNotInUse(event_state, obj_struct, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145"); } return skip; } void CoreChecks::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) { if (!event) return; EVENT_STATE *event_state = GetEventState(event); const VulkanTypedHandle obj_struct(event, kVulkanObjectTypeEvent); InvalidateCommandBuffers(event_state->cb_bindings, obj_struct); eventMap.erase(event); } bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) { if (disabled.query_validation) return false; QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool); const VulkanTypedHandle obj_struct(queryPool, kVulkanObjectTypeQueryPool); bool skip = false; if (qp_state) { skip |= ValidateObjectNotInUse(qp_state, obj_struct, "vkDestroyQueryPool", "VUID-vkDestroyQueryPool-queryPool-00793"); } return skip; } void CoreChecks::PreCallRecordDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) { if (!queryPool) return; QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool); const VulkanTypedHandle obj_struct(queryPool, kVulkanObjectTypeQueryPool); InvalidateCommandBuffers(qp_state->cb_bindings, obj_struct); queryPoolMap.erase(queryPool); } bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) { if (disabled.query_validation) return false; bool skip = false; skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-00814", "VUID-vkGetQueryPoolResults-flags-00815", stride, "dataSize", dataSize, flags); auto query_pool_state = queryPoolMap.find(queryPool); if (query_pool_state != queryPoolMap.end()) { if ((query_pool_state->second->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, HandleToUint64(queryPool), "VUID-vkGetQueryPoolResults-queryType-00818", "%s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.", report_data->FormatHandle(queryPool).c_str()); } } QueryObject query_obj{queryPool, 0u}; QueryResultType result_type; for (uint32_t i = 0; i < queryCount; ++i) { query_obj.query = firstQuery + i; auto query_data = queryToStateMap.find(query_obj); if (query_data != queryToStateMap.end()) { result_type = GetQueryResultType(query_data->second, flags); } else { result_type = QUERYRESULT_UNKNOWN; } if (result_type != QUERYRESULT_SOME_DATA) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, HandleToUint64(queryPool), kVUID_Core_DrawState_InvalidQuery, "vkGetQueryPoolResults() on %s and query %" PRIu32 ": %s", report_data->FormatHandle(queryPool).c_str(), query_obj.query, string_QueryResultType(result_type)); } } return skip; } void CoreChecks::PostCallRecordGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags, VkResult result) { if ((VK_SUCCESS != result) && (VK_NOT_READY != result)) return; // TODO: this has been modified but is still wasteful. unordered_set<QueryObject> query_set; for (uint32_t i = 0; i < queryCount; ++i) { QueryObject query = {queryPool, firstQuery + i}; auto query_state_pair = queryToStateMap.find(query); if ((query_state_pair != queryToStateMap.end()) && (query_state_pair->second == QUERYSTATE_AVAILABLE)) { query_set.insert(query); } } unordered_map<QueryObject, std::vector<VkCommandBuffer>> queries_in_flight; for (auto &cmd_buffer : commandBufferMap) { if (cmd_buffer.second->in_use.load()) { for (auto query_state_pair : cmd_buffer.second->queryToStateMap) { if (query_set.find(query_state_pair.first) != query_set.end()) { queries_in_flight[query_state_pair.first].push_back(cmd_buffer.first); } } } } for (auto qif_pair : queries_in_flight) { for (auto cmd_buffer : qif_pair.second) { auto cb = GetCBState(cmd_buffer); auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(qif_pair.first); if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) { for (auto event : query_event_pair->second) { eventMap[event].needsSignaled = true; } } } } } // Return true if given ranges intersect, else false // Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted // in an error so not checking that here // pad_ranges bool indicates a linear and non-linear comparison which requires padding // In the case where padding is required, if an alias is encountered then a validation error is reported and skip // may be set by the callback function so caller should merge in skip value if padding case is possible. // This check can be skipped by passing skip_checks=true, for call sites outside the validation path. bool CoreChecks::RangesIntersect(MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip, bool skip_checks) { *skip = false; auto r1_start = range1->start; auto r1_end = range1->end; auto r2_start = range2->start; auto r2_end = range2->end; VkDeviceSize pad_align = 1; if (range1->linear != range2->linear) { pad_align = phys_dev_props.limits.bufferImageGranularity; } if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false; if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false; if (!skip_checks && (range1->linear != range2->linear)) { // In linear vs. non-linear case, warn of aliasing const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear"; const char *r2_linear_str = range2->linear ? "linear" : "non-linear"; auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT; *skip |= log_msg( report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, kVUID_Core_MemTrack_InvalidAliasing, "%s %s is aliased with %s %s which may indicate a bug. For further info refer to the Buffer-Image Granularity " "section of the Vulkan specification. " "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/xhtml/vkspec.html#resources-bufferimagegranularity)", r1_linear_str, report_data->FormatHandle(MemoryRangeTypedHandle(*range1)).c_str(), r2_linear_str, report_data->FormatHandle(MemoryRangeTypedHandle(*range2)).c_str()); } // Ranges intersect return true; } // Simplified RangesIntersect that calls above function to check range1 for intersection with offset & end addresses bool CoreChecks::RangesIntersect(MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) { // Create a local MEMORY_RANGE struct to wrap offset/size MEMORY_RANGE range_wrap; // Synch linear with range1 to avoid padding and potential validation error case range_wrap.linear = range1->linear; range_wrap.start = offset; range_wrap.end = end; bool tmp_bool; return RangesIntersect(range1, &range_wrap, &tmp_bool, true); } bool CoreChecks::ValidateInsertMemoryRange(uint64_t handle, DEVICE_MEMORY_STATE *mem_info, VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image, bool is_linear, const char *api_name) { bool skip = false; MEMORY_RANGE range; range.image = is_image; range.handle = handle; range.linear = is_linear; range.memory = mem_info->mem; range.start = memoryOffset; range.size = memRequirements.size; range.end = memoryOffset + memRequirements.size - 1; range.aliases.clear(); // Check for aliasing problems. for (auto &obj_range_pair : mem_info->bound_ranges) { auto check_range = &obj_range_pair.second; bool intersection_error = false; if (RangesIntersect(&range, check_range, &intersection_error, false)) { skip |= intersection_error; range.aliases.insert(check_range); } } if (memoryOffset >= mem_info->alloc_info.allocationSize) { const char *error_code = is_image ? "VUID-vkBindImageMemory-memoryOffset-01046" : "VUID-vkBindBufferMemory-memoryOffset-01031"; skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_info->mem), error_code, "In %s, attempting to bind %s to %s, memoryOffset=0x%" PRIxLEAST64 " must be less than the memory allocation size 0x%" PRIxLEAST64 ".", api_name, report_data->FormatHandle(mem_info->mem).c_str(), report_data->FormatHandle(MemoryRangeTypedHandle(range)).c_str(), memoryOffset, mem_info->alloc_info.allocationSize); } return skip; } // Object with given handle is being bound to memory w/ given mem_info struct. // Track the newly bound memory range with given memoryOffset // Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear // and non-linear range incorrectly overlap. // Return true if an error is flagged and the user callback returns "true", otherwise false // is_image indicates an image object, otherwise handle is for a buffer // is_linear indicates a buffer or linear image void CoreChecks::InsertMemoryRange(uint64_t handle, DEVICE_MEMORY_STATE *mem_info, VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image, bool is_linear) { MEMORY_RANGE range; range.image = is_image; range.handle = handle; range.linear = is_linear; range.memory = mem_info->mem; range.start = memoryOffset; range.size = memRequirements.size; range.end = memoryOffset + memRequirements.size - 1; range.aliases.clear(); // Update Memory aliasing // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges; for (auto &obj_range_pair : mem_info->bound_ranges) { auto check_range = &obj_range_pair.second; bool intersection_error = false; if (RangesIntersect(&range, check_range, &intersection_error, true)) { range.aliases.insert(check_range); tmp_alias_ranges.insert(check_range); } } mem_info->bound_ranges[handle] = std::move(range); for (auto tmp_range : tmp_alias_ranges) { tmp_range->aliases.insert(&mem_info->bound_ranges[handle]); } if (is_image) mem_info->bound_images.insert(handle); else mem_info->bound_buffers.insert(handle); } bool CoreChecks::ValidateInsertImageMemoryRange(VkImage image, DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear, const char *api_name) { return ValidateInsertMemoryRange(HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name); } void CoreChecks::InsertImageMemoryRange(VkImage image, DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear) { InsertMemoryRange(HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear); } bool CoreChecks::ValidateInsertBufferMemoryRange(VkBuffer buffer, DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) { return ValidateInsertMemoryRange(HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name); } void CoreChecks::InsertBufferMemoryRange(VkBuffer buffer, DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs) { InsertMemoryRange(HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true); } // Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info // is_image indicates if handle is for image or buffer // This function will also remove the handle-to-index mapping from the appropriate // map and clean up any aliases for range being removed. static void RemoveMemoryRange(uint64_t handle, DEVICE_MEMORY_STATE *mem_info, bool is_image) { auto erase_range = &mem_info->bound_ranges[handle]; for (auto alias_range : erase_range->aliases) { alias_range->aliases.erase(erase_range); } erase_range->aliases.clear(); mem_info->bound_ranges.erase(handle); if (is_image) { mem_info->bound_images.erase(handle); } else { mem_info->bound_buffers.erase(handle); } } void CoreChecks::RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEMORY_STATE *mem_info) { RemoveMemoryRange(handle, mem_info, false); } void CoreChecks::RemoveImageMemoryRange(uint64_t handle, DEVICE_MEMORY_STATE *mem_info) { RemoveMemoryRange(handle, mem_info, true); } bool CoreChecks::ValidateMemoryTypes(const DEVICE_MEMORY_STATE *mem_info, const uint32_t memory_type_bits, const char *funcName, const char *msgCode) { bool skip = false; if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_info->mem), msgCode, "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory " "type (0x%X) of %s.", funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, report_data->FormatHandle(mem_info->mem).c_str()); } return skip; } bool CoreChecks::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset, const char *api_name) { BUFFER_STATE *buffer_state = GetBufferState(buffer); bool skip = false; if (buffer_state) { // Track objects tied to memory uint64_t buffer_handle = HandleToUint64(buffer); const VulkanTypedHandle obj_struct(buffer, kVulkanObjectTypeBuffer); skip = ValidateSetMemBinding(mem, obj_struct, api_name); if (!buffer_state->memory_requirements_checked) { // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from // vkGetBufferMemoryRequirements() skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle, kVUID_Core_DrawState_InvalidBuffer, "%s: Binding memory to %s but vkGetBufferMemoryRequirements() has not been called on that buffer.", api_name, report_data->FormatHandle(buffer).c_str()); // Make the call for them so we can verify the state DispatchGetBufferMemoryRequirements(device, buffer, &buffer_state->requirements); } // Validate bound memory range information const auto mem_info = GetDevMemState(mem); if (mem_info) { skip |= ValidateInsertBufferMemoryRange(buffer, mem_info, memoryOffset, buffer_state->requirements, api_name); skip |= ValidateMemoryTypes(mem_info, buffer_state->requirements.memoryTypeBits, api_name, "VUID-vkBindBufferMemory-memory-01035"); } // Validate memory requirements alignment if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle, "VUID-vkBindBufferMemory-memoryOffset-01036", "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetBufferMemoryRequirements with buffer.", api_name, memoryOffset, buffer_state->requirements.alignment); } if (mem_info) { // Validate memory requirements size if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle, "VUID-vkBindBufferMemory-size-01037", "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetBufferMemoryRequirements with buffer.", api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size); } // Validate dedicated allocation if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) { // TODO: Add vkBindBufferMemory2KHR error message when added to spec. auto validation_error = kVUIDUndefined; if (strcmp(api_name, "vkBindBufferMemory()") == 0) { validation_error = "VUID-vkBindBufferMemory-memory-01508"; } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle, validation_error, "%s: for dedicated %s, VkMemoryDedicatedAllocateInfoKHR::buffer %s must be equal " "to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(mem_info->dedicated_buffer).c_str(), report_data->FormatHandle(buffer).c_str(), memoryOffset); } } } return skip; } bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) { const char *api_name = "vkBindBufferMemory()"; return ValidateBindBufferMemory(buffer, mem, memoryOffset, api_name); } void CoreChecks::UpdateBindBufferMemoryState(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) { BUFFER_STATE *buffer_state = GetBufferState(buffer); if (buffer_state) { // Track bound memory range information auto mem_info = GetDevMemState(mem); if (mem_info) { InsertBufferMemoryRange(buffer, mem_info, memoryOffset, buffer_state->requirements); } // Track objects tied to memory SetMemBinding(mem, buffer_state, memoryOffset, VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer)); } } void CoreChecks::PostCallRecordBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset, VkResult result) { if (VK_SUCCESS != result) return; UpdateBindBufferMemoryState(buffer, mem, memoryOffset); } bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } void CoreChecks::PostCallRecordBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) { for (uint32_t i = 0; i < bindInfoCount; i++) { UpdateBindBufferMemoryState(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset); } } void CoreChecks::PostCallRecordBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) { for (uint32_t i = 0; i < bindInfoCount; i++) { UpdateBindBufferMemoryState(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset); } } void CoreChecks::RecordGetBufferMemoryRequirementsState(VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) { BUFFER_STATE *buffer_state = GetBufferState(buffer); if (buffer_state) { buffer_state->requirements = *pMemoryRequirements; buffer_state->memory_requirements_checked = true; } } void CoreChecks::PostCallRecordGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) { RecordGetBufferMemoryRequirementsState(buffer, pMemoryRequirements); } void CoreChecks::PostCallRecordGetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo, VkMemoryRequirements2KHR *pMemoryRequirements) { RecordGetBufferMemoryRequirementsState(pInfo->buffer, &pMemoryRequirements->memoryRequirements); } void CoreChecks::PostCallRecordGetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo, VkMemoryRequirements2KHR *pMemoryRequirements) { RecordGetBufferMemoryRequirementsState(pInfo->buffer, &pMemoryRequirements->memoryRequirements); } bool CoreChecks::ValidateGetImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2 *pInfo) { bool skip = false; if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateGetImageMemoryRequirements2ANDROID(pInfo->image); } return skip; } bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) { return ValidateGetImageMemoryRequirements2(pInfo); } bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) { return ValidateGetImageMemoryRequirements2(pInfo); } void CoreChecks::RecordGetImageMemoryRequiementsState(VkImage image, VkMemoryRequirements *pMemoryRequirements) { IMAGE_STATE *image_state = GetImageState(image); if (image_state) { image_state->requirements = *pMemoryRequirements; image_state->memory_requirements_checked = true; } } void CoreChecks::PostCallRecordGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) { RecordGetImageMemoryRequiementsState(image, pMemoryRequirements); } void CoreChecks::PostCallRecordGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) { RecordGetImageMemoryRequiementsState(pInfo->image, &pMemoryRequirements->memoryRequirements); } void CoreChecks::PostCallRecordGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) { RecordGetImageMemoryRequiementsState(pInfo->image, &pMemoryRequirements->memoryRequirements); } static void RecordGetImageSparseMemoryRequirementsState(IMAGE_STATE *image_state, VkSparseImageMemoryRequirements *sparse_image_memory_requirements) { image_state->sparse_requirements.emplace_back(*sparse_image_memory_requirements); if (sparse_image_memory_requirements->formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) { image_state->sparse_metadata_required = true; } } void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements *pSparseMemoryRequirements) { auto image_state = GetImageState(image); image_state->get_sparse_reqs_called = true; if (!pSparseMemoryRequirements) return; for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) { RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i]); } } void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo, uint32_t *pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) { auto image_state = GetImageState(pInfo->image); image_state->get_sparse_reqs_called = true; if (!pSparseMemoryRequirements) return; for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) { assert(!pSparseMemoryRequirements[i].pNext); // TODO: If an extension is ever added here we need to handle it RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i].memoryRequirements); } } void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements2KHR( VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo, uint32_t *pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) { auto image_state = GetImageState(pInfo->image); image_state->get_sparse_reqs_called = true; if (!pSparseMemoryRequirements) return; for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) { assert(!pSparseMemoryRequirements[i].pNext); // TODO: If an extension is ever added here we need to handle it RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i].memoryRequirements); } } bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) { // Can't wrap AHB-specific validation in a device extension check here, but no harm bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(report_data, pImageFormatInfo, pImageFormatProperties); return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) { // Can't wrap AHB-specific validation in a device extension check here, but no harm bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(report_data, pImageFormatInfo, pImageFormatProperties); return skip; } void CoreChecks::PreCallRecordDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) { if (!shaderModule) return; shaderModuleMap.erase(shaderModule); } bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) { PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); const VulkanTypedHandle obj_struct(pipeline, kVulkanObjectTypePipeline); bool skip = false; if (pipeline_state) { skip |= ValidateObjectNotInUse(pipeline_state, obj_struct, "vkDestroyPipeline", "VUID-vkDestroyPipeline-pipeline-00765"); } return skip; } void CoreChecks::PreCallRecordDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) { if (!pipeline) return; PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); const VulkanTypedHandle obj_struct(pipeline, kVulkanObjectTypePipeline); // Any bound cmd buffers are now invalid InvalidateCommandBuffers(pipeline_state->cb_bindings, obj_struct); if (enabled.gpu_validation) { GpuPreCallRecordDestroyPipeline(pipeline); } pipelineMap.erase(pipeline); } void CoreChecks::PreCallRecordDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) { if (!pipelineLayout) return; pipelineLayoutMap.erase(pipelineLayout); } bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) { SAMPLER_STATE *sampler_state = GetSamplerState(sampler); const VulkanTypedHandle obj_struct(sampler, kVulkanObjectTypeSampler); bool skip = false; if (sampler_state) { skip |= ValidateObjectNotInUse(sampler_state, obj_struct, "vkDestroySampler", "VUID-vkDestroySampler-sampler-01082"); } return skip; } void CoreChecks::PreCallRecordDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) { if (!sampler) return; SAMPLER_STATE *sampler_state = GetSamplerState(sampler); const VulkanTypedHandle obj_struct(sampler, kVulkanObjectTypeSampler); // Any bound cmd buffers are now invalid if (sampler_state) { InvalidateCommandBuffers(sampler_state->cb_bindings, obj_struct); } samplerMap.erase(sampler); } void CoreChecks::PreCallRecordDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) { if (!descriptorSetLayout) return; auto layout_it = descriptorSetLayoutMap.find(descriptorSetLayout); if (layout_it != descriptorSetLayoutMap.end()) { layout_it->second.get()->MarkDestroyed(); descriptorSetLayoutMap.erase(layout_it); } } bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) { DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool); const VulkanTypedHandle obj_struct(descriptorPool, kVulkanObjectTypeDescriptorPool); bool skip = false; if (desc_pool_state) { skip |= ValidateObjectNotInUse(desc_pool_state, obj_struct, "vkDestroyDescriptorPool", "VUID-vkDestroyDescriptorPool-descriptorPool-00303"); } return skip; } void CoreChecks::PreCallRecordDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) { if (!descriptorPool) return; DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool); const VulkanTypedHandle obj_struct(descriptorPool, kVulkanObjectTypeDescriptorPool); if (desc_pool_state) { // Any bound cmd buffers are now invalid InvalidateCommandBuffers(desc_pool_state->cb_bindings, obj_struct); // Free sets that were in this pool for (auto ds : desc_pool_state->sets) { FreeDescriptorSet(ds); } descriptorPoolMap.erase(descriptorPool); } } // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result // If this is a secondary command buffer, then make sure its primary is also in-flight // If primary is not in-flight, then remove secondary from global in-flight set // This function is only valid at a point when cmdBuffer is being reset or freed bool CoreChecks::CheckCommandBufferInFlight(const CMD_BUFFER_STATE *cb_node, const char *action, const char *error_code) { bool skip = false; if (cb_node->in_use.load()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer), error_code, "Attempt to %s %s which is in use.", action, report_data->FormatHandle(cb_node->commandBuffer).c_str()); } return skip; } // Iterate over all cmdBuffers in given commandPool and verify that each is not in use bool CoreChecks::CheckCommandBuffersInFlight(COMMAND_POOL_STATE *pPool, const char *action, const char *error_code) { bool skip = false; for (auto cmd_buffer : pPool->commandBuffers) { skip |= CheckCommandBufferInFlight(GetCBState(cmd_buffer), action, error_code); } return skip; } // Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState void CoreChecks::FreeCommandBufferStates(COMMAND_POOL_STATE *pool_state, const uint32_t command_buffer_count, const VkCommandBuffer *command_buffers) { for (uint32_t i = 0; i < command_buffer_count; i++) { auto cb_state = GetCBState(command_buffers[i]); // Remove references to command buffer's state and delete if (cb_state) { // reset prior to delete, removing various references to it. // TODO: fix this, it's insane. ResetCommandBufferState(cb_state->commandBuffer); // Remove the cb_state's references from COMMAND_POOL_STATEs pool_state->commandBuffers.erase(command_buffers[i]); // Remove the cb debug labels EraseCmdDebugUtilsLabel(report_data, cb_state->commandBuffer); // Remove CBState from CB map commandBufferMap.erase(cb_state->commandBuffer); } } } bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) { bool skip = false; for (uint32_t i = 0; i < commandBufferCount; i++) { auto cb_node = GetCBState(pCommandBuffers[i]); // Delete CB information structure, and remove from commandBufferMap if (cb_node) { skip |= CheckCommandBufferInFlight(cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047"); } } return skip; } void CoreChecks::PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) { auto pPool = GetCommandPoolState(commandPool); FreeCommandBufferStates(pPool, commandBufferCount, pCommandBuffers); } bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) { return ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex", "VUID-vkCreateCommandPool-queueFamilyIndex-01937"); } void CoreChecks::PostCallRecordCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool, VkResult result) { if (VK_SUCCESS != result) return; std::unique_ptr<COMMAND_POOL_STATE> cmd_pool_state(new COMMAND_POOL_STATE{}); cmd_pool_state->createFlags = pCreateInfo->flags; cmd_pool_state->queueFamilyIndex = pCreateInfo->queueFamilyIndex; commandPoolMap[*pCommandPool] = std::move(cmd_pool_state); } bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) { if (disabled.query_validation) return false; bool skip = false; if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) { if (!enabled_features.core.pipelineStatisticsQuery) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, "VUID-VkQueryPoolCreateInfo-queryType-00791", "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with " "VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE."); } } return skip; } void CoreChecks::PostCallRecordCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool, VkResult result) { if (VK_SUCCESS != result) return; std::unique_ptr<QUERY_POOL_STATE> query_pool_state(new QUERY_POOL_STATE{}); query_pool_state->createInfo = *pCreateInfo; queryPoolMap[*pQueryPool] = std::move(query_pool_state); QueryObject query_obj{*pQueryPool, 0u}; for (uint32_t i = 0; i < pCreateInfo->queryCount; ++i) { query_obj.query = i; queryToStateMap[query_obj] = QUERYSTATE_UNKNOWN; } } bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool); bool skip = false; if (cp_state) { // Verify that command buffers in pool are complete (not in-flight) skip |= CheckCommandBuffersInFlight(cp_state, "destroy command pool with", "VUID-vkDestroyCommandPool-commandPool-00041"); } return skip; } void CoreChecks::PreCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { if (!commandPool) return; COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool); // Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers // "When a pool is destroyed, all command buffers allocated from the pool are freed." if (cp_state) { // Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration. std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()}; FreeCommandBufferStates(cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data()); commandPoolMap.erase(commandPool); } } bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) { auto command_pool_state = GetCommandPoolState(commandPool); return CheckCommandBuffersInFlight(command_pool_state, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040"); } void CoreChecks::PostCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags, VkResult result) { if (VK_SUCCESS != result) return; // Reset all of the CBs allocated from this pool auto command_pool_state = GetCommandPoolState(commandPool); for (auto cmdBuffer : command_pool_state->commandBuffers) { ResetCommandBufferState(cmdBuffer); } } bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) { bool skip = false; for (uint32_t i = 0; i < fenceCount; ++i) { auto pFence = GetFenceState(pFences[i]); if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(pFences[i]), "VUID-vkResetFences-pFences-01123", "%s is in use.", report_data->FormatHandle(pFences[i]).c_str()); } } return skip; } void CoreChecks::PostCallRecordResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkResult result) { for (uint32_t i = 0; i < fenceCount; ++i) { auto pFence = GetFenceState(pFences[i]); if (pFence) { if (pFence->scope == kSyncScopeInternal) { pFence->state = FENCE_UNSIGNALED; } else if (pFence->scope == kSyncScopeExternalTemporary) { pFence->scope = kSyncScopeInternal; } } } } // For given cb_nodes, invalidate them and track object causing invalidation void ValidationStateTracker::InvalidateCommandBuffers(std::unordered_set<CMD_BUFFER_STATE *> const &cb_nodes, const VulkanTypedHandle &obj) { for (auto cb_node : cb_nodes) { if (cb_node->state == CB_RECORDING) { cb_node->state = CB_INVALID_INCOMPLETE; } else if (cb_node->state == CB_RECORDED) { cb_node->state = CB_INVALID_COMPLETE; } cb_node->broken_bindings.push_back(obj); // if secondary, then propagate the invalidation to the primaries that will call us. if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) { InvalidateCommandBuffers(cb_node->linkedCommandBuffers, obj); } } } bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) { FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer); const VulkanTypedHandle obj_struct(framebuffer, kVulkanObjectTypeFramebuffer); bool skip = false; if (framebuffer_state) { skip |= ValidateObjectNotInUse(framebuffer_state, obj_struct, "vkDestroyFramebuffer", "VUID-vkDestroyFramebuffer-framebuffer-00892"); } return skip; } void CoreChecks::PreCallRecordDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) { if (!framebuffer) return; FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer); const VulkanTypedHandle obj_struct(framebuffer, kVulkanObjectTypeFramebuffer); InvalidateCommandBuffers(framebuffer_state->cb_bindings, obj_struct); frameBufferMap.erase(framebuffer); } bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass); const VulkanTypedHandle obj_struct(renderPass, kVulkanObjectTypeRenderPass); bool skip = false; if (rp_state) { skip |= ValidateObjectNotInUse(rp_state, obj_struct, "vkDestroyRenderPass", "VUID-vkDestroyRenderPass-renderPass-00873"); } return skip; } void CoreChecks::PreCallRecordDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { if (!renderPass) return; RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass); const VulkanTypedHandle obj_struct(renderPass, kVulkanObjectTypeRenderPass); InvalidateCommandBuffers(rp_state->cb_bindings, obj_struct); renderPassMap.erase(renderPass); } // Access helper functions for external modules VkFormatProperties CoreChecks::GetPDFormatProperties(const VkFormat format) { VkFormatProperties format_properties; DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &format_properties); return format_properties; } VkResult CoreChecks::GetPDImageFormatProperties(const VkImageCreateInfo *image_ci, VkImageFormatProperties *pImageFormatProperties) { return DispatchGetPhysicalDeviceImageFormatProperties(physical_device, image_ci->format, image_ci->imageType, image_ci->tiling, image_ci->usage, image_ci->flags, pImageFormatProperties); } VkResult CoreChecks::GetPDImageFormatProperties2(const VkPhysicalDeviceImageFormatInfo2 *phys_dev_image_fmt_info, VkImageFormatProperties2 *pImageFormatProperties) { if (!instance_extensions.vk_khr_get_physical_device_properties_2) return VK_ERROR_EXTENSION_NOT_PRESENT; return DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, phys_dev_image_fmt_info, pImageFormatProperties); } void CoreChecks::PostCallRecordCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence, VkResult result) { if (VK_SUCCESS != result) return; std::unique_ptr<FENCE_STATE> fence_state(new FENCE_STATE{}); fence_state->fence = *pFence; fence_state->createInfo = *pCreateInfo; fence_state->state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED; fenceMap[*pFence] = std::move(fence_state); } // Validation cache: // CV is the bottommost implementor of this extension. Don't pass calls down. // utility function to set collective state for pipeline void SetPipelineState(PIPELINE_STATE *pPipe) { // If any attachment used by this pipeline has blendEnable, set top-level blendEnable if (pPipe->graphicsPipelineCI.pColorBlendState) { for (size_t i = 0; i < pPipe->attachments.size(); ++i) { if (VK_TRUE == pPipe->attachments[i].blendEnable) { if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) { pPipe->blendConstantsEnabled = true; } } } } } bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::unique_ptr<PIPELINE_STATE>> const &pipe_state_vec, const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) { bool skip = false; const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits; for (uint32_t i = 0; i < count; i++) { auto pvids_ci = lvl_find_in_chain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext); if (nullptr == pvids_ci) continue; const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get(); for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) { const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]); if (vibdd->binding >= device_limits->maxVertexInputBindings) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).", i, j, vibdd->binding, device_limits->maxVertexInputBindings); } if (vibdd->divisor > phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).", i, j, vibdd->divisor, phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor); } if ((0 == vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not " "enabled.", i, j); } if ((1 != vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not " "enabled.", i, j, vibdd->divisor); } // Find the corresponding binding description and validate input rate setting bool failed_01871 = true; for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) { if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) && (VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) { failed_01871 = false; break; } } if (failed_01871) { // Description not found, or has incorrect inputRate value skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's " "VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.", i, j, vibdd->binding); } } } return skip; } bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *cgpl_state_data) { bool skip = false; create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data); cgpl_state->pipe_state.reserve(count); for (uint32_t i = 0; i < count; i++) { cgpl_state->pipe_state.push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE)); (cgpl_state->pipe_state)[i]->initGraphicsPipeline(&pCreateInfos[i], GetRenderPassStateSharedPtr(pCreateInfos[i].renderPass)); (cgpl_state->pipe_state)[i]->pipeline_layout = *GetPipelineLayout(pCreateInfos[i].layout); } for (uint32_t i = 0; i < count; i++) { skip |= ValidatePipelineLocked(cgpl_state->pipe_state, i); } for (uint32_t i = 0; i < count; i++) { skip |= ValidatePipelineUnlocked(cgpl_state->pipe_state, i); } if (device_extensions.vk_ext_vertex_attribute_divisor) { skip |= ValidatePipelineVertexDivisors(cgpl_state->pipe_state, count, pCreateInfos); } return skip; } // GPU validation may replace pCreateInfos for the down-chain call void CoreChecks::PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *cgpl_state_data) { create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data); cgpl_state->pCreateInfos = pCreateInfos; // GPU Validation may replace instrumented shaders with non-instrumented ones, so allow it to modify the createinfos. if (enabled.gpu_validation) { cgpl_state->gpu_create_infos = GpuPreCallRecordCreateGraphicsPipelines(pipelineCache, count, pCreateInfos, pAllocator, pPipelines, cgpl_state->pipe_state); cgpl_state->pCreateInfos = reinterpret_cast<VkGraphicsPipelineCreateInfo *>(cgpl_state->gpu_create_infos.data()); } } void CoreChecks::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *cgpl_state_data) { create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data); // This API may create pipelines regardless of the return value for (uint32_t i = 0; i < count; i++) { if (pPipelines[i] != VK_NULL_HANDLE) { (cgpl_state->pipe_state)[i]->pipeline = pPipelines[i]; pipelineMap[pPipelines[i]] = std::move((cgpl_state->pipe_state)[i]); } } // GPU val needs clean up regardless of result if (enabled.gpu_validation) { GpuPostCallRecordCreateGraphicsPipelines(count, pCreateInfos, pAllocator, pPipelines); cgpl_state->gpu_create_infos.clear(); } cgpl_state->pipe_state.clear(); } bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *ccpl_state_data) { bool skip = false; auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data); ccpl_state->pipe_state.reserve(count); for (uint32_t i = 0; i < count; i++) { // Create and initialize internal tracking data structure ccpl_state->pipe_state.push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE)); ccpl_state->pipe_state.back()->initComputePipeline(&pCreateInfos[i]); ccpl_state->pipe_state.back()->pipeline_layout = *GetPipelineLayout(pCreateInfos[i].layout); // TODO: Add Compute Pipeline Verification skip |= ValidateComputePipeline(ccpl_state->pipe_state.back().get()); } return skip; } void CoreChecks::PreCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *ccpl_state_data) { auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data); ccpl_state->pCreateInfos = pCreateInfos; // GPU Validation may replace instrumented shaders with non-instrumented ones, so allow it to modify the createinfos. if (enabled.gpu_validation) { ccpl_state->gpu_create_infos = GpuPreCallRecordCreateComputePipelines(pipelineCache, count, pCreateInfos, pAllocator, pPipelines, ccpl_state->pipe_state); ccpl_state->pCreateInfos = reinterpret_cast<VkComputePipelineCreateInfo *>(ccpl_state->gpu_create_infos.data()); } } void CoreChecks::PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *ccpl_state_data) { create_compute_pipeline_api_state *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data); // This API may create pipelines regardless of the return value for (uint32_t i = 0; i < count; i++) { if (pPipelines[i] != VK_NULL_HANDLE) { (ccpl_state->pipe_state)[i]->pipeline = pPipelines[i]; pipelineMap[pPipelines[i]] = std::move((ccpl_state->pipe_state)[i]); } } // GPU val needs clean up regardless of result if (enabled.gpu_validation) { GpuPostCallRecordCreateComputePipelines(count, pCreateInfos, pAllocator, pPipelines); } } bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoNV *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *pipe_state_data) { bool skip = false; // The order of operations here is a little convoluted but gets the job done // 1. Pipeline create state is first shadowed into PIPELINE_STATE struct // 2. Create state is then validated (which uses flags setup during shadowing) // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap uint32_t i = 0; vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state = reinterpret_cast<vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data); pipe_state->reserve(count); for (i = 0; i < count; i++) { pipe_state->push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE)); (*pipe_state)[i]->initRayTracingPipelineNV(&pCreateInfos[i]); (*pipe_state)[i]->pipeline_layout = *GetPipelineLayout(pCreateInfos[i].layout); } for (i = 0; i < count; i++) { skip |= ValidateRayTracingPipelineNV((*pipe_state)[i].get()); } return skip; } void CoreChecks::PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoNV *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *pipe_state_data) { vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state = reinterpret_cast<vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data); // This API may create pipelines regardless of the return value for (uint32_t i = 0; i < count; i++) { if (pPipelines[i] != VK_NULL_HANDLE) { (*pipe_state)[i]->pipeline = pPipelines[i]; pipelineMap[pPipelines[i]] = std::move((*pipe_state)[i]); } } } void CoreChecks::PostCallRecordCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSampler *pSampler, VkResult result) { samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo)); } bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) { return cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo( report_data, pCreateInfo, device_extensions.vk_khr_push_descriptor, phys_dev_ext_props.max_push_descriptors, device_extensions.vk_ext_descriptor_indexing, &enabled_features.descriptor_indexing, &enabled_features.inline_uniform_block, &phys_dev_ext_props.inline_uniform_block_props); } void CoreChecks::PostCallRecordCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout, VkResult result) { if (VK_SUCCESS != result) return; descriptorSetLayoutMap[*pSetLayout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(pCreateInfo, *pSetLayout); } // Used by CreatePipelineLayout and CmdPushConstants. // Note that the index argument is optional and only used by CreatePipelineLayout. bool CoreChecks::ValidatePushConstantRange(const uint32_t offset, const uint32_t size, const char *caller_name, uint32_t index = 0) { if (disabled.push_constant_range) return false; uint32_t const maxPushConstantsSize = phys_dev_props.limits.maxPushConstantsSize; bool skip = false; // Check that offset + size don't exceed the max. // Prevent arithetic overflow here by avoiding addition and testing in this order. if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) { // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem. if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { if (offset >= maxPushConstantsSize) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-offset-00294", "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.", caller_name, index, offset, maxPushConstantsSize); } if (size > maxPushConstantsSize - offset) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-size-00298", "%s call has push constants index %u with offset %u and size %u that exceeds this device's " "maxPushConstantSize of %u.", caller_name, index, offset, size, maxPushConstantsSize); } } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { if (offset >= maxPushConstantsSize) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkCmdPushConstants-offset-00370", "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.", caller_name, index, offset, maxPushConstantsSize); } if (size > maxPushConstantsSize - offset) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkCmdPushConstants-size-00371", "%s call has push constants index %u with offset %u and size %u that exceeds this device's " "maxPushConstantSize of %u.", caller_name, index, offset, size, maxPushConstantsSize); } } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name); } } // size needs to be non-zero and a multiple of 4. if ((size == 0) || ((size & 0x3) != 0)) { if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { if (size == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-size-00296", "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name, index, size); } if (size & 0x3) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-size-00297", "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name, index, size); } } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { if (size == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkCmdPushConstants-size-arraylength", "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name, index, size); } if (size & 0x3) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkCmdPushConstants-size-00369", "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name, index, size); } } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name); } } // offset needs to be a multiple of 4. if ((offset & 0x3) != 0) { if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-offset-00295", "%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name, index, offset); } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-vkCmdPushConstants-offset-00368", "%s call has push constants with offset %u. Offset must be a multiple of 4.", caller_name, offset); } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name); } } return skip; } enum DSL_DESCRIPTOR_GROUPS { DSL_TYPE_SAMPLERS = 0, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK, DSL_NUM_DESCRIPTOR_GROUPS }; // Used by PreCallValidateCreatePipelineLayout. // Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage std::valarray<uint32_t> GetDescriptorCountMaxPerStage( const DeviceFeatures *enabled_features, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts, bool skip_update_after_bind) { // Identify active pipeline stages std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT, VK_SHADER_STAGE_COMPUTE_BIT}; if (enabled_features->core.geometryShader) { stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT); } if (enabled_features->core.tessellationShader) { stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); } // Allow iteration over enum values std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = { DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK}; // Sum by layouts per stage, then pick max of stages per type std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages for (auto stage : stage_flags) { std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums for (auto dsl : set_layouts) { if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) { switch (binding->descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount; stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: // count one block per binding. descriptorCount is number of bytes stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++; break; default: break; } } } } for (auto type : dsl_groups) { max_sum[type] = std::max(stage_sum[type], max_sum[type]); } } return max_sum; } // Used by PreCallValidateCreatePipelineLayout. // Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type. // Note: descriptors only count against the limit once even if used by multiple stages. std::map<uint32_t, uint32_t> GetDescriptorSum( const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) { std::map<uint32_t, uint32_t> sum_by_type; for (auto dsl : set_layouts) { if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (binding->descriptorCount > 0) { if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { // count one block per binding. descriptorCount is number of bytes sum_by_type[binding->descriptorType]++; } else { sum_by_type[binding->descriptorType] += binding->descriptorCount; } } } } return sum_by_type; } bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) { bool skip = false; // Validate layout count against device physical limit if (pCreateInfo->setLayoutCount > phys_dev_props.limits.maxBoundDescriptorSets) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286", "vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).", pCreateInfo->setLayoutCount, phys_dev_props.limits.maxBoundDescriptorSets); } // Validate Push Constant ranges uint32_t i, j; for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { skip |= ValidatePushConstantRange(pCreateInfo->pPushConstantRanges[i].offset, pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i); if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPushConstantRange-stageFlags-requiredbitmask", "vkCreatePipelineLayout() call has no stageFlags set."); } } // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges. for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) { if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292", "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j); } } } // Early-out if (skip) return skip; std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr); unsigned int push_descriptor_set_count = 0; { for (i = 0; i < pCreateInfo->setLayoutCount; ++i) { set_layouts[i] = GetDescriptorSetLayout(this, pCreateInfo->pSetLayouts[i]); if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count; } } if (push_descriptor_set_count > 1) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293", "vkCreatePipelineLayout() Multiple push descriptor sets found."); } // Max descriptors by type, within a single pipeline stage std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, true); // Samplers if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > phys_dev_props.limits.maxPerStageDescriptorSamplers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287", "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device " "maxPerStageDescriptorSamplers limit (%d).", max_descriptors_per_stage[DSL_TYPE_SAMPLERS], phys_dev_props.limits.maxPerStageDescriptorSamplers); } // Uniform buffers if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288", "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUniformBuffers limit (%d).", max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorUniformBuffers); } // Storage buffers if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289", "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device " "maxPerStageDescriptorStorageBuffers limit (%d).", max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorStorageBuffers); } // Sampled images if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorSampledImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290", "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device " "maxPerStageDescriptorSampledImages limit (%d).", max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props.limits.maxPerStageDescriptorSampledImages); } // Storage images if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorStorageImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291", "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device " "maxPerStageDescriptorStorageImages limit (%d).", max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES], phys_dev_props.limits.maxPerStageDescriptorStorageImages); } // Input attachments if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props.limits.maxPerStageDescriptorInputAttachments) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676", "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device " "maxPerStageDescriptorInputAttachments limit (%d).", max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS], phys_dev_props.limits.maxPerStageDescriptorInputAttachments); } // Inline uniform blocks if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] > phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214", "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device " "maxPerStageDescriptorInlineUniformBlocks limit (%d).", max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK], phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks); } // Total descriptors by type // std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(set_layouts, true); // Samplers uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]; if (sum > phys_dev_props.limits.maxDescriptorSetSamplers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677", "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device " "maxDescriptorSetSamplers limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetSamplers); } // Uniform buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props.limits.maxDescriptorSetUniformBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678", "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUniformBuffers limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props.limits.maxDescriptorSetUniformBuffers); } // Dynamic uniform buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679", "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUniformBuffersDynamic limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC], phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic); } // Storage buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props.limits.maxDescriptorSetStorageBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680", "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageBuffers limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props.limits.maxDescriptorSetStorageBuffers); } // Dynamic storage buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681", "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageBuffersDynamic limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC], phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic); } // Sampled images sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]; if (sum > phys_dev_props.limits.maxDescriptorSetSampledImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682", "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device " "maxDescriptorSetSampledImages limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetSampledImages); } // Storage images sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]; if (sum > phys_dev_props.limits.maxDescriptorSetStorageImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683", "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageImages limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetStorageImages); } // Input attachments if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props.limits.maxDescriptorSetInputAttachments) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684", "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device " "maxDescriptorSetInputAttachments limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props.limits.maxDescriptorSetInputAttachments); } // Inline uniform blocks if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] > phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216", "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device " "maxDescriptorSetInlineUniformBlocks limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT], phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks); } if (device_extensions.vk_ext_descriptor_indexing) { // XXX TODO: replace with correct VU messages // Max descriptors by type, within a single pipeline stage std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, false); // Samplers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022", "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers); } // Uniform buffers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023", "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers); } // Storage buffers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024", "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers); } // Sampled images if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025", "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages); } // Storage images if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026", "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages); } // Input attachments if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027", "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS], phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments); } // Inline uniform blocks if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] > phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215", "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK], phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks); } // Total descriptors by type, summed across all pipeline stages // std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(set_layouts, false); // Samplers sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]; if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036", "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindSamplers limit (%d).", sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers); } // Uniform buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037", "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers); } // Dynamic uniform buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038", "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC], phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic); } // Storage buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039", "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers); } // Dynamic storage buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040", "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC], phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic); } // Sampled images sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]; if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041", "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindSampledImages limit (%d).", sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages); } // Storage images sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]; if (sum > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042", "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageImages limit (%d).", sum, phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages); } // Input attachments if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043", "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments); } // Inline uniform blocks if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] > phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217", "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT], phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks); } } return skip; } // For repeatable sorting, not very useful for "memory in range" search struct PushConstantRangeCompare { bool operator()(const VkPushConstantRange *lhs, const VkPushConstantRange *rhs) const { if (lhs->offset == rhs->offset) { if (lhs->size == rhs->size) { // The comparison is arbitrary, but avoids false aliasing by comparing all fields. return lhs->stageFlags < rhs->stageFlags; } // If the offsets are the same then sorting by the end of range is useful for validation return lhs->size < rhs->size; } return lhs->offset < rhs->offset; } }; static PushConstantRangesDict push_constant_ranges_dict; PushConstantRangesId GetCanonicalId(const VkPipelineLayoutCreateInfo *info) { if (!info->pPushConstantRanges) { // Hand back the empty entry (creating as needed)... return push_constant_ranges_dict.look_up(PushConstantRanges()); } // Sort the input ranges to ensure equivalent ranges map to the same id std::set<const VkPushConstantRange *, PushConstantRangeCompare> sorted; for (uint32_t i = 0; i < info->pushConstantRangeCount; i++) { sorted.insert(info->pPushConstantRanges + i); } PushConstantRanges ranges(sorted.size()); for (const auto range : sorted) { ranges.emplace_back(*range); } return push_constant_ranges_dict.look_up(std::move(ranges)); } // Dictionary of canoncial form of the pipeline set layout of descriptor set layouts static PipelineLayoutSetLayoutsDict pipeline_layout_set_layouts_dict; // Dictionary of canonical form of the "compatible for set" records static PipelineLayoutCompatDict pipeline_layout_compat_dict; static PipelineLayoutCompatId GetCanonicalId(const uint32_t set_index, const PushConstantRangesId pcr_id, const PipelineLayoutSetLayoutsId set_layouts_id) { return pipeline_layout_compat_dict.look_up(PipelineLayoutCompatDef(set_index, pcr_id, set_layouts_id)); } void CoreChecks::PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout, void *cpl_state_data) { create_pipeline_layout_api_state *cpl_state = reinterpret_cast<create_pipeline_layout_api_state *>(cpl_state_data); if (enabled.gpu_validation) { GpuPreCallCreatePipelineLayout(pCreateInfo, pAllocator, pPipelineLayout, &cpl_state->new_layouts, &cpl_state->modified_create_info); } } void CoreChecks::PostCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout, VkResult result) { // Clean up GPU validation if (enabled.gpu_validation) { GpuPostCallCreatePipelineLayout(result); } if (VK_SUCCESS != result) return; std::unique_ptr<PIPELINE_LAYOUT_STATE> pipeline_layout_state(new PIPELINE_LAYOUT_STATE{}); pipeline_layout_state->layout = *pPipelineLayout; pipeline_layout_state->set_layouts.resize(pCreateInfo->setLayoutCount); PipelineLayoutSetLayoutsDef set_layouts(pCreateInfo->setLayoutCount); for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) { pipeline_layout_state->set_layouts[i] = GetDescriptorSetLayout(this, pCreateInfo->pSetLayouts[i]); set_layouts[i] = pipeline_layout_state->set_layouts[i]->GetLayoutId(); } // Get canonical form IDs for the "compatible for set" contents pipeline_layout_state->push_constant_ranges = GetCanonicalId(pCreateInfo); auto set_layouts_id = pipeline_layout_set_layouts_dict.look_up(set_layouts); pipeline_layout_state->compat_for_set.reserve(pCreateInfo->setLayoutCount); // Create table of "compatible for set N" cannonical forms for trivial accept validation for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) { pipeline_layout_state->compat_for_set.emplace_back( GetCanonicalId(i, pipeline_layout_state->push_constant_ranges, set_layouts_id)); } pipelineLayoutMap[*pPipelineLayout] = std::move(pipeline_layout_state); } void CoreChecks::PostCallRecordCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool, VkResult result) { if (VK_SUCCESS != result) return; descriptorPoolMap[*pDescriptorPool] = std::unique_ptr<DESCRIPTOR_POOL_STATE>(new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo)); } bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) { // Make sure sets being destroyed are not currently in-use if (disabled.idle_descriptor_set) return false; bool skip = false; DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(descriptorPool); if (pPool != nullptr) { for (auto ds : pPool->sets) { if (ds && ds->in_use.load()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(descriptorPool), "VUID-vkResetDescriptorPool-descriptorPool-00313", "It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer."); if (skip) break; } } } return skip; } void CoreChecks::PostCallRecordResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags, VkResult result) { if (VK_SUCCESS != result) return; DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(descriptorPool); // TODO: validate flags // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet for (auto ds : pPool->sets) { FreeDescriptorSet(ds); } pPool->sets.clear(); // Reset available count for each type and available sets for this pool for (auto it = pPool->availableDescriptorTypeCount.begin(); it != pPool->availableDescriptorTypeCount.end(); ++it) { pPool->availableDescriptorTypeCount[it->first] = pPool->maxDescriptorTypeCount[it->first]; } pPool->availableSets = pPool->maxSets; } // Ensure the pool contains enough descriptors and descriptor sets to satisfy // an allocation request. Fills common_data with the total number of descriptors of each type required, // as well as DescriptorSetLayout ptrs used for later update. bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets, void *ads_state_data) { // Always update common data cvdescriptorset::AllocateDescriptorSetsData *ads_state = reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data); UpdateAllocateDescriptorSetsData(pAllocateInfo, ads_state); // All state checks for AllocateDescriptorSets is done in single function return ValidateAllocateDescriptorSets(pAllocateInfo, ads_state); } // Allocation state was good and call down chain was made so update state based on allocating descriptor sets void CoreChecks::PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets, VkResult result, void *ads_state_data) { if (VK_SUCCESS != result) return; // All the updates are contained in a single cvdescriptorset function cvdescriptorset::AllocateDescriptorSetsData *ads_state = reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data); PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, ads_state); } bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) { // Make sure that no sets being destroyed are in-flight bool skip = false; // First make sure sets being destroyed are not currently in-use for (uint32_t i = 0; i < count; ++i) { if (pDescriptorSets[i] != VK_NULL_HANDLE) { skip |= ValidateIdleDescriptorSet(pDescriptorSets[i], "vkFreeDescriptorSets"); } } DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool); if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) { // Can't Free from a NON_FREE pool skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(descriptorPool), "VUID-vkFreeDescriptorSets-descriptorPool-00312", "It is invalid to call vkFreeDescriptorSets() with a pool created without setting " "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT."); } return skip; } void CoreChecks::PreCallRecordFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) { DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool); // Update available descriptor sets in pool pool_state->availableSets += count; // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap for (uint32_t i = 0; i < count; ++i) { if (pDescriptorSets[i] != VK_NULL_HANDLE) { auto descriptor_set = setMap[pDescriptorSets[i]].get(); uint32_t type_index = 0, descriptor_count = 0; for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) { type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j)); descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j); pool_state->availableDescriptorTypeCount[type_index] += descriptor_count; } FreeDescriptorSet(descriptor_set); pool_state->sets.erase(descriptor_set); } } } bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) { // First thing to do is perform map look-ups. // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets // so we can't just do a single map look-up up-front, but do them individually in functions below // Now make call(s) that validate state, but don't perform state updates in this function // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the // namespace which will parse params and make calls into specific class instances return ValidateUpdateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies, "vkUpdateDescriptorSets()"); } void CoreChecks::PreCallRecordUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) { cvdescriptorset::PerformUpdateDescriptorSets(this, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies); } void CoreChecks::PostCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer, VkResult result) { if (VK_SUCCESS != result) return; auto pPool = GetCommandPoolState(pCreateInfo->commandPool); if (pPool) { for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) { // Add command buffer to its commandPool map pPool->commandBuffers.insert(pCommandBuffer[i]); std::unique_ptr<CMD_BUFFER_STATE> pCB(new CMD_BUFFER_STATE{}); pCB->createInfo = *pCreateInfo; pCB->device = device; // Add command buffer to map commandBufferMap[pCommandBuffer[i]] = std::move(pCB); ResetCommandBufferState(pCommandBuffer[i]); } } } // Add bindings between the given cmd buffer & framebuffer and the framebuffer's children void CoreChecks::AddFramebufferBinding(CMD_BUFFER_STATE *cb_state, FRAMEBUFFER_STATE *fb_state) { AddCommandBufferBinding(&fb_state->cb_bindings, VulkanTypedHandle(fb_state->framebuffer, kVulkanObjectTypeFramebuffer), cb_state); const uint32_t attachmentCount = fb_state->createInfo.attachmentCount; for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) { auto view_state = GetAttachmentImageViewState(fb_state, attachment); if (view_state) { AddCommandBufferBindingImageView(cb_state, view_state); } } } bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); if (!cb_state) return false; bool skip = false; if (cb_state->in_use.load()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049", "Calling vkBeginCommandBuffer() on active %s before it has completed. You must check " "command buffer fence before this call.", report_data->FormatHandle(commandBuffer).c_str()); } if (cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { // Secondary Command Buffer const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo; if (!pInfo) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00051", "vkBeginCommandBuffer(): Secondary %s must have inheritance info.", report_data->FormatHandle(commandBuffer).c_str()); } else { if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { assert(pInfo->renderPass); auto framebuffer = GetFramebufferState(pInfo->framebuffer); if (framebuffer) { if (framebuffer->createInfo.renderPass != pInfo->renderPass) { // renderPass that framebuffer was created with must be compatible with local renderPass skip |= ValidateRenderPassCompatibility("framebuffer", framebuffer->rp_state.get(), "command buffer", GetRenderPassState(pInfo->renderPass), "vkBeginCommandBuffer()", "VUID-VkCommandBufferBeginInfo-flags-00055"); } } } if ((pInfo->occlusionQueryEnable == VK_FALSE || enabled_features.core.occlusionQueryPrecise == VK_FALSE) && (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00052", "vkBeginCommandBuffer(): Secondary %s must not have VK_QUERY_CONTROL_PRECISE_BIT if " "occulusionQuery is disabled or the device does not support precise occlusion queries.", report_data->FormatHandle(commandBuffer).c_str()); } } if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) { auto renderPass = GetRenderPassState(pInfo->renderPass); if (renderPass) { if (pInfo->subpass >= renderPass->createInfo.subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkCommandBufferBeginInfo-flags-00054", "vkBeginCommandBuffer(): Secondary %s must have a subpass index (%d) that is " "less than the number of subpasses (%d).", report_data->FormatHandle(commandBuffer).c_str(), pInfo->subpass, renderPass->createInfo.subpassCount); } } } } if (CB_RECORDING == cb_state->state) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049", "vkBeginCommandBuffer(): Cannot call Begin on %s in the RECORDING state. Must first call " "vkEndCommandBuffer().", report_data->FormatHandle(commandBuffer).c_str()); } else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) { VkCommandPool cmdPool = cb_state->createInfo.commandPool; auto pPool = GetCommandPoolState(cmdPool); if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00050", "Call to vkBeginCommandBuffer() on %s attempts to implicitly reset cmdBuffer created from " "%s that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmdPool).c_str()); } } auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext); if (chained_device_group_struct) { skip |= ValidateDeviceMaskToPhysicalDeviceCount( chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106"); skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107"); } return skip; } void CoreChecks::PreCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); if (!cb_state) return; // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references ClearCmdBufAndMemReferences(cb_state); if (cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { // Secondary Command Buffer const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo; if (pInfo) { if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { assert(pInfo->renderPass); auto framebuffer = GetFramebufferState(pInfo->framebuffer); if (framebuffer) { // Connect this framebuffer and its children to this cmdBuffer AddFramebufferBinding(cb_state, framebuffer); } } } } if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) { ResetCommandBufferState(commandBuffer); } // Set updated state here in case implicit reset occurs above cb_state->state = CB_RECORDING; cb_state->beginInfo = *pBeginInfo; if (cb_state->beginInfo.pInheritanceInfo) { cb_state->inheritanceInfo = *(cb_state->beginInfo.pInheritanceInfo); cb_state->beginInfo.pInheritanceInfo = &cb_state->inheritanceInfo; // If we are a secondary command-buffer and inheriting. Update the items we should inherit. if ((cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { cb_state->activeRenderPass = GetRenderPassState(cb_state->beginInfo.pInheritanceInfo->renderPass); cb_state->activeSubpass = cb_state->beginInfo.pInheritanceInfo->subpass; cb_state->activeFramebuffer = cb_state->beginInfo.pInheritanceInfo->framebuffer; cb_state->framebuffers.insert(cb_state->beginInfo.pInheritanceInfo->framebuffer); } } auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext); if (chained_device_group_struct) { cb_state->initial_device_mask = chained_device_group_struct->deviceMask; } else { cb_state->initial_device_mask = (1 << physical_device_count) - 1; } } bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); if (!cb_state) return false; bool skip = false; if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) || !(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { // This needs spec clarification to update valid usage, see comments in PR: // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165 skip |= InsideRenderPass(cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060"); } skip |= ValidateCmd(cb_state, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()"); for (auto query : cb_state->activeQueries) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkEndCommandBuffer-commandBuffer-00061", "Ending command buffer with in progress query: %s, query %d.", report_data->FormatHandle(query.pool).c_str(), query.query); } return skip; } void CoreChecks::PostCallRecordEndCommandBuffer(VkCommandBuffer commandBuffer, VkResult result) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); if (!cb_state) return; // Cached validation is specific to a specific recording of a specific command buffer. for (auto descriptor_set : cb_state->validated_descriptor_sets) { descriptor_set->ClearCachedValidation(cb_state); } cb_state->validated_descriptor_sets.clear(); if (VK_SUCCESS == result) { cb_state->state = CB_RECORDED; } } bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) { bool skip = false; CMD_BUFFER_STATE *pCB = GetCBState(commandBuffer); if (!pCB) return false; VkCommandPool cmdPool = pCB->createInfo.commandPool; auto pPool = GetCommandPoolState(cmdPool); if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkResetCommandBuffer-commandBuffer-00046", "Attempt to reset %s created from %s that does NOT have the " "VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmdPool).c_str()); } skip |= CheckCommandBufferInFlight(pCB, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045"); return skip; } void CoreChecks::PostCallRecordResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags, VkResult result) { if (VK_SUCCESS == result) { ResetCommandBufferState(commandBuffer); } } static const char *GetPipelineTypeName(VkPipelineBindPoint pipelineBindPoint) { switch (pipelineBindPoint) { case VK_PIPELINE_BIND_POINT_GRAPHICS: return "graphics"; case VK_PIPELINE_BIND_POINT_COMPUTE: return "compute"; case VK_PIPELINE_BIND_POINT_RAY_TRACING_NV: return "ray-tracing"; default: return "unknown"; } } bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdBindPipeline-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()"); static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindPipeline-pipelineBindPoint-00777"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindPipeline-pipelineBindPoint-00778"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindPipeline-pipelineBindPoint-02391")}; skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors); auto pipeline_state = GetPipelineState(pipeline); assert(pipeline_state); const auto &pipeline_state_bind_point = pipeline_state->getPipelineType(); if (pipelineBindPoint != pipeline_state_bind_point) { if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-00779", "Cannot bind a pipeline of type %s to the graphics pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-00780", "Cannot bind a pipeline of type %s to the compute pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindPipeline-pipelineBindPoint-02392", "Cannot bind a pipeline of type %s to the ray-tracing pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } } return skip; } void CoreChecks::PreCallRecordCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); auto pipe_state = GetPipelineState(pipeline); if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) { cb_state->status &= ~cb_state->static_status; cb_state->static_status = MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState); cb_state->status |= cb_state->static_status; } cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state; SetPipelineState(pipe_state); AddCommandBufferBinding(&pipe_state->cb_bindings, VulkanTypedHandle(pipeline, kVulkanObjectTypePipeline), cb_state); } bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewport-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()"); if (cb_state->static_status & CBSTATUS_VIEWPORT_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetViewport-None-01221", "vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag."); } return skip; } void CoreChecks::PreCallRecordCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport; cb_state->status |= CBSTATUS_VIEWPORT_SET; } bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetScissor-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()"); if (cb_state->static_status & CBSTATUS_SCISSOR_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-None-00590", "vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor; cb_state->status |= CBSTATUS_SCISSOR_SET; } bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetExclusiveScissorNV()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetExclusiveScissorNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETEXCLUSIVESCISSORNV, "vkCmdSetExclusiveScissorNV()"); if (cb_state->static_status & CBSTATUS_EXCLUSIVE_SCISSOR_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02032", "vkCmdSetExclusiveScissorNV(): pipeline was created without VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV flag."); } if (!enabled_features.exclusive_scissor.exclusiveScissor) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02031", "vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled."); } return skip; } void CoreChecks::PreCallRecordCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); // TODO: We don't have VUIDs for validating that all exclusive scissors have been set. // cb_state->exclusiveScissorMask |= ((1u << exclusiveScissorCount) - 1u) << firstExclusiveScissor; cb_state->status |= CBSTATUS_EXCLUSIVE_SCISSOR_SET; } bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindShadingRateImageNV()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBindShadingRateImageNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDSHADINGRATEIMAGENV, "vkCmdBindShadingRateImageNV()"); if (!enabled_features.shading_rate_image.shadingRateImage) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdBindShadingRateImageNV-None-02058", "vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled."); } if (imageView != VK_NULL_HANDLE) { auto view_state = GetImageViewState(imageView); auto &ivci = view_state->create_info; if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02059", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid " "VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY."); } if (view_state && ivci.format != VK_FORMAT_R8_UINT) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02060", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT."); } const VkImageCreateInfo *ici = view_state ? &GetImageState(view_state->create_info.image)->createInfo : nullptr; if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02061", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been " "created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set."); } if (view_state) { auto image_state = GetImageState(view_state->create_info.image); bool hit_error = false; // XXX TODO: While the VUID says "each subresource", only the base mip level is // actually used. Since we don't have an existing convenience function to iterate // over all mip levels, just don't bother with non-base levels. VkImageSubresourceRange &range = view_state->create_info.subresourceRange; VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount}; if (image_state) { skip |= VerifyImageLayout(cb_state, image_state, subresource, imageLayout, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, "vkCmdCopyImage()", "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063", "VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error); } } } return skip; } void CoreChecks::PreCallRecordCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); if (imageView != VK_NULL_HANDLE) { auto view_state = GetImageViewState(imageView); AddCommandBufferBindingImageView(cb_state, view_state); } } bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV *pShadingRatePalettes) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportShadingRatePaletteNV()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTENV, "vkCmdSetViewportShadingRatePaletteNV()"); if (!enabled_features.shading_rate_image.shadingRateImage) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064", "vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled."); } if (cb_state->static_status & CBSTATUS_SHADING_RATE_PALETTE_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02065", "vkCmdSetViewportShadingRatePaletteNV(): pipeline was created without " "VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV flag."); } for (uint32_t i = 0; i < viewportCount; ++i) { auto *palette = &pShadingRatePalettes[i]; if (palette->shadingRatePaletteEntryCount == 0 || palette->shadingRatePaletteEntryCount > phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071", "vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize."); } } return skip; } void CoreChecks::PreCallRecordCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV *pShadingRatePalettes) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); // TODO: We don't have VUIDs for validating that all shading rate palettes have been set. // cb_state->shadingRatePaletteMask |= ((1u << viewportCount) - 1u) << firstViewport; cb_state->status |= CBSTATUS_SHADING_RATE_PALETTE_SET; } bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetLineWidth-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()"); if (cb_state->static_status & CBSTATUS_LINE_WIDTH_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetLineWidth-None-00787", "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag."); } return skip; } void CoreChecks::PreCallRecordCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->status |= CBSTATUS_LINE_WIDTH_SET; } bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetDepthBias-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()"); if (cb_state->static_status & CBSTATUS_DEPTH_BIAS_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-None-00789", "vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag.."); } if ((depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-depthBiasClamp-00790", "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must " "be set to 0.0."); } return skip; } void CoreChecks::PreCallRecordCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->status |= CBSTATUS_DEPTH_BIAS_SET; } bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetBlendConstants-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()"); if (cb_state->static_status & CBSTATUS_BLEND_CONSTANTS_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetBlendConstants-None-00612", "vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->status |= CBSTATUS_BLEND_CONSTANTS_SET; } bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetDepthBounds-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()"); if (cb_state->static_status & CBSTATUS_DEPTH_BOUNDS_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBounds-None-00599", "vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->status |= CBSTATUS_DEPTH_BOUNDS_SET; } bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetStencilCompareMask-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()"); if (cb_state->static_status & CBSTATUS_STENCIL_READ_MASK_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilCompareMask-None-00602", "vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->status |= CBSTATUS_STENCIL_READ_MASK_SET; } bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetStencilWriteMask-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()"); if (cb_state->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilWriteMask-None-00603", "vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->status |= CBSTATUS_STENCIL_WRITE_MASK_SET; } bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetStencilReference-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()"); if (cb_state->static_status & CBSTATUS_STENCIL_REFERENCE_SET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilReference-None-00604", "vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag.."); } return skip; } void CoreChecks::PreCallRecordCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->status |= CBSTATUS_STENCIL_REFERENCE_SET; } // Update pipeline_layout bind points applying the "Pipeline Layout Compatibility" rules void CoreChecks::UpdateLastBoundDescriptorSets(CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint pipeline_bind_point, const PIPELINE_LAYOUT_STATE *pipeline_layout, uint32_t first_set, uint32_t set_count, const std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets, uint32_t dynamic_offset_count, const uint32_t *p_dynamic_offsets) { // Defensive assert(set_count); if (0 == set_count) return; assert(pipeline_layout); if (!pipeline_layout) return; uint32_t required_size = first_set + set_count; const uint32_t last_binding_index = required_size - 1; assert(last_binding_index < pipeline_layout->compat_for_set.size()); // Some useful shorthand auto &last_bound = cb_state->lastBound[pipeline_bind_point]; auto &bound_sets = last_bound.boundDescriptorSets; auto &dynamic_offsets = last_bound.dynamicOffsets; auto &bound_compat_ids = last_bound.compat_id_for_set; auto &pipe_compat_ids = pipeline_layout->compat_for_set; const uint32_t current_size = static_cast<uint32_t>(bound_sets.size()); assert(current_size == dynamic_offsets.size()); assert(current_size == bound_compat_ids.size()); // We need this three times in this function, but nowhere else auto push_descriptor_cleanup = [&last_bound](const cvdescriptorset::DescriptorSet *ds) -> bool { if (ds && ds->IsPushDescriptor()) { assert(ds == last_bound.push_descriptor_set.get()); last_bound.push_descriptor_set = nullptr; return true; } return false; }; // Clean up the "disturbed" before and after the range to be set if (required_size < current_size) { if (bound_compat_ids[last_binding_index] != pipe_compat_ids[last_binding_index]) { // We're disturbing those after last, we'll shrink below, but first need to check for and cleanup the push_descriptor for (auto set_idx = required_size; set_idx < current_size; ++set_idx) { if (push_descriptor_cleanup(bound_sets[set_idx])) break; } } else { // We're not disturbing past last, so leave the upper binding data alone. required_size = current_size; } } // We resize if we need more set entries or if those past "last" are disturbed if (required_size != current_size) { // TODO: put these size tied things in a struct (touches many lines) bound_sets.resize(required_size); dynamic_offsets.resize(required_size); bound_compat_ids.resize(required_size); } // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update for (uint32_t set_idx = 0; set_idx < first_set; ++set_idx) { if (bound_compat_ids[set_idx] != pipe_compat_ids[set_idx]) { push_descriptor_cleanup(bound_sets[set_idx]); bound_sets[set_idx] = nullptr; dynamic_offsets[set_idx].clear(); bound_compat_ids[set_idx] = pipe_compat_ids[set_idx]; } } // Now update the bound sets with the input sets const uint32_t *input_dynamic_offsets = p_dynamic_offsets; // "read" pointer for dynamic offset data for (uint32_t input_idx = 0; input_idx < set_count; input_idx++) { auto set_idx = input_idx + first_set; // set_idx is index within layout, input_idx is index within input descriptor sets cvdescriptorset::DescriptorSet *descriptor_set = descriptor_sets[input_idx]; // Record binding (or push) if (descriptor_set != last_bound.push_descriptor_set.get()) { // Only cleanup the push descriptors if they aren't the currently used set. push_descriptor_cleanup(bound_sets[set_idx]); } bound_sets[set_idx] = descriptor_set; bound_compat_ids[set_idx] = pipe_compat_ids[set_idx]; // compat ids are canonical *per* set index if (descriptor_set) { auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount(); // TODO: Add logic for tracking push_descriptor offsets (here or in caller) if (set_dynamic_descriptor_count && input_dynamic_offsets) { const uint32_t *end_offset = input_dynamic_offsets + set_dynamic_descriptor_count; dynamic_offsets[set_idx] = std::vector<uint32_t>(input_dynamic_offsets, end_offset); input_dynamic_offsets = end_offset; assert(input_dynamic_offsets <= (p_dynamic_offsets + dynamic_offset_count)); } else { dynamic_offsets[set_idx].clear(); } if (!descriptor_set->IsPushDescriptor()) { // Can't cache validation of push_descriptors cb_state->validated_descriptor_sets.insert(descriptor_set); } } } } // Update the bound state for the bind point, including the effects of incompatible pipeline layouts void CoreChecks::PreCallRecordCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); auto pipeline_layout = GetPipelineLayout(layout); std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets; descriptor_sets.reserve(setCount); // Construct a list of the descriptors bool found_non_null = false; for (uint32_t i = 0; i < setCount; i++) { cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[i]); descriptor_sets.emplace_back(descriptor_set); found_non_null |= descriptor_set != nullptr; } if (found_non_null) { // which implies setCount > 0 UpdateLastBoundDescriptorSets(cb_state, pipelineBindPoint, pipeline_layout, firstSet, setCount, descriptor_sets, dynamicOffsetCount, pDynamicOffsets); cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout; } } static bool ValidateDynamicOffsetAlignment(const debug_report_data *report_data, const VkDescriptorSetLayoutBinding *binding, VkDescriptorType test_type, VkDeviceSize alignment, const uint32_t *pDynamicOffsets, const char *err_msg, const char *limit_name, uint32_t *offset_idx) { bool skip = false; if (binding->descriptorType == test_type) { const auto end_idx = *offset_idx + binding->descriptorCount; for (uint32_t current_idx = *offset_idx; current_idx < end_idx; current_idx++) { if (SafeModulo(pDynamicOffsets[current_idx], alignment) != 0) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, err_msg, "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of device limit %s 0x%" PRIxLEAST64 ".", current_idx, pDynamicOffsets[current_idx], limit_name, alignment); } } *offset_idx = end_idx; } return skip; } bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmdQueueFlags(cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdBindDescriptorSets-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()"); // Track total count of dynamic descriptor types to make sure we have an offset for each one uint32_t total_dynamic_descriptors = 0; string error_string = ""; uint32_t last_set_index = firstSet + setCount - 1; if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) { cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1); cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1); cb_state->lastBound[pipelineBindPoint].compat_id_for_set.resize(last_set_index + 1); } auto pipeline_layout = GetPipelineLayout(layout); for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) { cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[set_idx]); if (descriptor_set) { // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout if (!VerifySetLayoutCompatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358", "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of " "%s due to: %s.", set_idx, set_idx + firstSet, report_data->FormatHandle(layout).c_str(), error_string.c_str()); } auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount(); if (set_dynamic_descriptor_count) { // First make sure we won't overstep bounds of pDynamicOffsets array if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) { // Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359", "descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u dynamicOffsets are left in " "pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.", set_idx, report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(), descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors)); // Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from // testing against the "short tail" we're skipping below. total_dynamic_descriptors = dynamicOffsetCount; } else { // Validate dynamic offsets and Dynamic Offset Minimums uint32_t cur_dyn_offset = total_dynamic_descriptors; const auto dsl = descriptor_set->GetLayout(); const auto binding_count = dsl->GetBindingCount(); const auto &limits = phys_dev_props.limits; for (uint32_t binding_idx = 0; binding_idx < binding_count; binding_idx++) { const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); skip |= ValidateDynamicOffsetAlignment(report_data, binding, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, limits.minUniformBufferOffsetAlignment, pDynamicOffsets, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971", "minUniformBufferOffsetAlignment", &cur_dyn_offset); skip |= ValidateDynamicOffsetAlignment(report_data, binding, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, limits.minStorageBufferOffsetAlignment, pDynamicOffsets, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972", "minStorageBufferOffsetAlignment", &cur_dyn_offset); } // Keep running total of dynamic descriptor count to verify at the end total_dynamic_descriptors += set_dynamic_descriptor_count; } } } else { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]), kVUID_Core_DrawState_InvalidSet, "Attempt to bind %s that doesn't exist!", report_data->FormatHandle(pDescriptorSets[set_idx]).c_str()); } } // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound if (total_dynamic_descriptors != dynamicOffsetCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359", "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should " "exactly match the number of dynamic descriptors.", setCount, total_dynamic_descriptors, dynamicOffsetCount); } return skip; } // Validates that the supplied bind point is supported for the command buffer (vis. the command pool) // Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint // TODO add vkCmdBindPipeline bind_point validation using this call. bool CoreChecks::ValidatePipelineBindPoint(CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint bind_point, const char *func_name, const std::map<VkPipelineBindPoint, std::string> &bind_errors) { bool skip = false; auto pool = GetCommandPoolState(cb_state->createInfo.commandPool); if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)), }; const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex]; if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) { const std::string &error = bind_errors.at(bind_point); auto cb_u64 = HandleToUint64(cb_state->commandBuffer); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_u64, error, "%s: %s was allocated from %s that does not support bindpoint %s.", func_name, report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(cb_state->createInfo.commandPool).c_str(), string_VkPipelineBindPoint(bind_point)); } } return skip; } bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); const char *func_name = "vkCmdPushDescriptorSetKHR()"; bool skip = false; skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name); skip |= ValidateCmdQueueFlags(cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT), "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool"); static const std::map<VkPipelineBindPoint, std::string> bind_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")}; skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, func_name, bind_errors); auto layout_data = GetPipelineLayout(layout); // Validate the set index points to a push descriptor set and is in range if (layout_data) { const auto &set_layouts = layout_data->set_layouts; const auto layout_u64 = HandleToUint64(layout); if (set < set_layouts.size()) { const auto dsl = set_layouts[set]; if (dsl) { if (!dsl->IsPushDescriptor()) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00365", "%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set, report_data->FormatHandle(layout).c_str()); } else { // Create an empty proxy in order to use the existing descriptor set update validation // TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we // don't have to do this. cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, VK_NULL_HANDLE, dsl, 0, this); skip |= ValidatePushDescriptorsUpdate(&proxy_ds, descriptorWriteCount, pDescriptorWrites, func_name); } } } else { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00364", "%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set, report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(set_layouts.size())); } } return skip; } void CoreChecks::RecordCmdPushDescriptorSetState(CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) { const auto &pipeline_layout = GetPipelineLayout(layout); // Short circuit invalid updates if (!pipeline_layout || (set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[set] || !pipeline_layout->set_layouts[set]->IsPushDescriptor()) return; // We need a descriptor set to update the bindings with, compatible with the passed layout const auto dsl = pipeline_layout->set_layouts[set]; auto &last_bound = cb_state->lastBound[pipelineBindPoint]; auto &push_descriptor_set = last_bound.push_descriptor_set; // If we are disturbing the current push_desriptor_set clear it if (!push_descriptor_set || !CompatForSet(set, last_bound.compat_id_for_set, pipeline_layout->compat_for_set)) { last_bound.UnbindAndResetPushDescriptorSet(new cvdescriptorset::DescriptorSet(0, 0, dsl, 0, this)); } std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets = {push_descriptor_set.get()}; UpdateLastBoundDescriptorSets(cb_state, pipelineBindPoint, pipeline_layout, set, 1, descriptor_sets, 0, nullptr); last_bound.pipeline_layout = layout; // Now that we have either the new or extant push_descriptor set ... do the write updates against it push_descriptor_set->PerformPushDescriptorsUpdate(descriptorWriteCount, pDescriptorWrites); } void CoreChecks::PreCallRecordCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordCmdPushDescriptorSetState(cb_state, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites); } static VkDeviceSize GetIndexAlignment(VkIndexType indexType) { switch (indexType) { case VK_INDEX_TYPE_UINT16: return 2; case VK_INDEX_TYPE_UINT32: return 4; default: // Not a real index type. Express no alignment requirement here; we expect upper layer // to have already picked up on the enum being nonsense. return 1; } } bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) { auto buffer_state = GetBufferState(buffer); auto cb_node = GetCBState(commandBuffer); assert(buffer_state); assert(cb_node); bool skip = ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, "VUID-vkCmdBindIndexBuffer-buffer-00433", "vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT"); skip |= ValidateCmdQueueFlags(cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBindIndexBuffer-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434"); auto offset_align = GetIndexAlignment(indexType); if (offset % offset_align) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdBindIndexBuffer-offset-00432", "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset, string_VkIndexType(indexType)); } return skip; } void CoreChecks::PreCallRecordCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) { auto buffer_state = GetBufferState(buffer); auto cb_node = GetCBState(commandBuffer); cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND; cb_node->index_buffer_binding.buffer = buffer; cb_node->index_buffer_binding.size = buffer_state->createInfo.size; cb_node->index_buffer_binding.offset = offset; cb_node->index_buffer_binding.index_type = indexType; } bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) { auto cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBindVertexBuffers-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()"); for (uint32_t i = 0; i < bindingCount; ++i) { auto buffer_state = GetBufferState(pBuffers[i]); assert(buffer_state); skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, "VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers()", "VUID-vkCmdBindVertexBuffers-pBuffers-00628"); if (pOffsets[i] >= buffer_state->createInfo.size) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer_state->buffer), "VUID-vkCmdBindVertexBuffers-pOffsets-00626", "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]); } } return skip; } void CoreChecks::PreCallRecordCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) { auto cb_state = GetCBState(commandBuffer); uint32_t end = firstBinding + bindingCount; if (cb_state->current_draw_data.vertex_buffer_bindings.size() < end) { cb_state->current_draw_data.vertex_buffer_bindings.resize(end); } for (uint32_t i = 0; i < bindingCount; ++i) { auto &vertex_buffer_binding = cb_state->current_draw_data.vertex_buffer_bindings[i + firstBinding]; vertex_buffer_binding.buffer = pBuffers[i]; vertex_buffer_binding.offset = pOffsets[i]; } } // Validate that an image's sampleCount matches the requirement for a specific API call bool CoreChecks::ValidateImageSampleCount(IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, const char *location, const std::string &msgCode) { bool skip = false; if (image_state->createInfo.samples != sample_count) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), msgCode, "%s for %s was created with a sample count of %s but must be %s.", location, report_data->FormatHandle(image_state->image).c_str(), string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count)); } return skip; } bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) { auto cb_state = GetCBState(commandBuffer); assert(cb_state); auto dst_buffer_state = GetBufferState(dstBuffer); assert(dst_buffer_state); bool skip = false; skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-dstBuffer-00035"); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdUpdateBuffer-dstBuffer-00034", "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdUpdateBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdUpdateBuffer-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()"); skip |= InsideRenderPass(cb_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-renderpass"); return skip; } void CoreChecks::PostCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) { auto cb_state = GetCBState(commandBuffer); auto dst_buffer_state = GetBufferState(dstBuffer); // Update bindings between buffer and cmd buffer AddCommandBufferBindingBuffer(cb_state, dst_buffer_state); } bool CoreChecks::SetEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { CMD_BUFFER_STATE *pCB = GetCBState(commandBuffer); if (pCB) { pCB->eventToStageMap[event] = stageMask; } auto queue_data = queueMap.find(queue); if (queue_data != queueMap.end()) { queue_data->second.eventToStageMap[event] = stageMask; } return false; } bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdSetEvent-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_SETEVENT, "vkCmdSetEvent()"); skip |= InsideRenderPass(cb_state, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-renderpass"); skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-stageMask-01150", "VUID-vkCmdSetEvent-stageMask-01151", "VUID-vkCmdSetEvent-stageMask-02107", "VUID-vkCmdSetEvent-stageMask-02108"); return skip; } void CoreChecks::PreCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); auto event_state = GetEventState(event); if (event_state) { AddCommandBufferBinding(&event_state->cb_bindings, VulkanTypedHandle(event, kVulkanObjectTypeEvent), cb_state); event_state->cb_bindings.insert(cb_state); } cb_state->events.push_back(event); if (!cb_state->waitedEvents.count(event)) { cb_state->writeEventsBeforeWait.push_back(event); } cb_state->eventUpdates.emplace_back([=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, stageMask); }); } bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdResetEvent-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_RESETEVENT, "vkCmdResetEvent()"); skip |= InsideRenderPass(cb_state, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-renderpass"); skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-stageMask-01154", "VUID-vkCmdResetEvent-stageMask-01155", "VUID-vkCmdResetEvent-stageMask-02109", "VUID-vkCmdResetEvent-stageMask-02110"); return skip; } void CoreChecks::PreCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); auto event_state = GetEventState(event); if (event_state) { AddCommandBufferBinding(&event_state->cb_bindings, VulkanTypedHandle(event, kVulkanObjectTypeEvent), cb_state); event_state->cb_bindings.insert(cb_state); } cb_state->events.push_back(event); if (!cb_state->waitedEvents.count(event)) { cb_state->writeEventsBeforeWait.push_back(event); } // TODO : Add check for "VUID-vkResetEvent-event-01148" cb_state->eventUpdates.emplace_back( [=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0)); }); } // Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT is set static VkPipelineStageFlags ExpandPipelineStageFlags(const DeviceExtensions &extensions, VkPipelineStageFlags inflags) { if (~inflags & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) return inflags; return (inflags & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) | (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | (extensions.vk_nv_mesh_shader ? (VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV) : 0) | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | (extensions.vk_ext_conditional_rendering ? VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT : 0) | (extensions.vk_ext_transform_feedback ? VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT : 0) | (extensions.vk_nv_shading_rate_image ? VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV : 0) | (extensions.vk_ext_fragment_density_map ? VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT : 0)); } static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags inflags) { return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0; } static int GetGraphicsPipelineStageLogicalOrdinal(VkPipelineStageFlagBits flag) { // Note that the list (and lookup) ignore invalid-for-enabled-extension condition. This should be checked elsewhere // and would greatly complicate this intentionally simple implementation // clang-format off const VkPipelineStageFlagBits ordered_array[] = { VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT, // Including the task/mesh shaders here is not technically correct, as they are in a // separate logical pipeline - but it works for the case this is currently used, and // fixing it would require significant rework and end up with the code being far more // verbose for no practical gain. // However, worth paying attention to this if using this function in a new way. VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT }; // clang-format on const int ordered_array_length = sizeof(ordered_array) / sizeof(VkPipelineStageFlagBits); for (int i = 0; i < ordered_array_length; ++i) { if (ordered_array[i] == flag) { return i; } } return -1; } // The following two functions technically have O(N^2) complexity, but it's for a value of O that's largely // stable and also rather tiny - this could definitely be rejigged to work more efficiently, but the impact // on runtime is currently negligible, so it wouldn't gain very much. // If we add a lot more graphics pipeline stages, this set of functions should be rewritten to accomodate. static VkPipelineStageFlagBits GetLogicallyEarliestGraphicsPipelineStage(VkPipelineStageFlags inflags) { VkPipelineStageFlagBits earliest_bit = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; int earliest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(earliest_bit); for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) { VkPipelineStageFlagBits current_flag = (VkPipelineStageFlagBits)((inflags & 0x1u) << i); if (current_flag) { int new_order = GetGraphicsPipelineStageLogicalOrdinal(current_flag); if (new_order != -1 && new_order < earliest_bit_order) { earliest_bit_order = new_order; earliest_bit = current_flag; } } inflags = inflags >> 1; } return earliest_bit; } static VkPipelineStageFlagBits GetLogicallyLatestGraphicsPipelineStage(VkPipelineStageFlags inflags) { VkPipelineStageFlagBits latest_bit = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; int latest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(latest_bit); for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) { if (inflags & 0x1u) { int new_order = GetGraphicsPipelineStageLogicalOrdinal((VkPipelineStageFlagBits)((inflags & 0x1u) << i)); if (new_order != -1 && new_order > latest_bit_order) { latest_bit_order = new_order; latest_bit = (VkPipelineStageFlagBits)((inflags & 0x1u) << i); } } inflags = inflags >> 1; } return latest_bit; } // Verify image barrier image state and that the image is consistent with FB image bool CoreChecks::ValidateImageBarrierImage(const char *funcName, CMD_BUFFER_STATE const *cb_state, VkFramebuffer framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription2KHR &sub_desc, const VulkanTypedHandle &rp_handle, uint32_t img_index, const VkImageMemoryBarrier &img_barrier) { bool skip = false; const auto &fb_state = GetFramebufferState(framebuffer); assert(fb_state); const auto img_bar_image = img_barrier.image; bool image_match = false; bool sub_image_found = false; // Do we find a corresponding subpass description VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED; uint32_t attach_index = 0; // Verify that a framebuffer image matches barrier image const auto attachmentCount = fb_state->createInfo.attachmentCount; for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) { auto view_state = GetAttachmentImageViewState(fb_state, attachment); if (view_state && (img_bar_image == view_state->create_info.image)) { image_match = true; attach_index = attachment; break; } } if (image_match) { // Make sure subpass is referring to matching attachment if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) { sub_image_layout = sub_desc.pDepthStencilAttachment->layout; sub_image_found = true; } else if (device_extensions.vk_khr_depth_stencil_resolve) { const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(sub_desc.pNext); if (resolve && resolve->pDepthStencilResolveAttachment && resolve->pDepthStencilResolveAttachment->attachment == attach_index) { sub_image_layout = resolve->pDepthStencilResolveAttachment->layout; sub_image_found = true; } } else { for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) { if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) { sub_image_layout = sub_desc.pColorAttachments[j].layout; sub_image_found = true; break; } else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) { sub_image_layout = sub_desc.pResolveAttachments[j].layout; sub_image_found = true; break; } } } if (!sub_image_found) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-image-02635", "%s: Barrier pImageMemoryBarriers[%d].%s is not referenced by the VkSubpassDescription for " "active subpass (%d) of current %s.", funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass, report_data->FormatHandle(rp_handle).c_str()); } } else { // !image_match auto const fb_handle = HandleToUint64(fb_state->framebuffer); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, fb_handle, "VUID-vkCmdPipelineBarrier-image-02635", "%s: Barrier pImageMemoryBarriers[%d].%s does not match an image from the current %s.", funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), report_data->FormatHandle(fb_state->framebuffer).c_str()); } if (img_barrier.oldLayout != img_barrier.newLayout) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-oldLayout-01181", "%s: As the Image Barrier for %s is being executed within a render pass instance, oldLayout must " "equal newLayout yet they are %s and %s.", funcName, report_data->FormatHandle(img_barrier.image).c_str(), string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(img_barrier.newLayout)); } else { if (sub_image_found && sub_image_layout != img_barrier.oldLayout) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-oldLayout-02636", "%s: Barrier pImageMemoryBarriers[%d].%s is referenced by the VkSubpassDescription for active " "subpass (%d) of current %s as having layout %s, but image barrier has layout %s.", funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass, report_data->FormatHandle(rp_handle).c_str(), string_VkImageLayout(sub_image_layout), string_VkImageLayout(img_barrier.oldLayout)); } } return skip; } // Validate image barriers within a renderPass bool CoreChecks::ValidateRenderPassImageBarriers(const char *funcName, CMD_BUFFER_STATE *cb_state, uint32_t active_subpass, const safe_VkSubpassDescription2KHR &sub_desc, const VulkanTypedHandle &rp_handle, const safe_VkSubpassDependency2KHR *dependencies, const std::vector<uint32_t> &self_dependencies, uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) { bool skip = false; for (uint32_t i = 0; i < image_mem_barrier_count; ++i) { const auto &img_barrier = image_barriers[i]; const auto &img_src_access_mask = img_barrier.srcAccessMask; const auto &img_dst_access_mask = img_barrier.dstAccessMask; bool access_mask_match = false; for (const auto self_dep_index : self_dependencies) { const auto &sub_dep = dependencies[self_dep_index]; access_mask_match = (img_src_access_mask == (sub_dep.srcAccessMask & img_src_access_mask)) && (img_dst_access_mask == (sub_dep.dstAccessMask & img_dst_access_mask)); if (access_mask_match) break; } if (!access_mask_match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency " "srcAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, i, img_src_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency " "dstAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, i, img_dst_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex || VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182", "%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and " "pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.", funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex); } // Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known if (VK_NULL_HANDLE == cb_state->activeFramebuffer) { assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level); // Secondary CB case w/o FB specified delay validation cb_state->cmd_execute_commands_functions.emplace_back([=](CMD_BUFFER_STATE *primary_cb, VkFramebuffer fb) { return ValidateImageBarrierImage(funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i, img_barrier); }); } else { skip |= ValidateImageBarrierImage(funcName, cb_state, cb_state->activeFramebuffer, active_subpass, sub_desc, rp_handle, i, img_barrier); } } return skip; } // Validate VUs for Pipeline Barriers that are within a renderPass // Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state bool CoreChecks::ValidateRenderPassPipelineBarriers(const char *funcName, CMD_BUFFER_STATE *cb_state, VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, VkDependencyFlags dependency_flags, uint32_t mem_barrier_count, const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count, const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) { bool skip = false; const auto rp_state = cb_state->activeRenderPass; const auto active_subpass = cb_state->activeSubpass; const VulkanTypedHandle rp_handle(rp_state->renderPass, kVulkanObjectTypeRenderPass); const auto &self_dependencies = rp_state->self_dependencies[active_subpass]; const auto &dependencies = rp_state->createInfo.pDependencies; if (self_dependencies.size() == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barriers cannot be set during subpass %d of %s with no self-dependency specified.", funcName, active_subpass, report_data->FormatHandle(rp_handle).c_str()); } else { // Grab ref to current subpassDescription up-front for use below const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass]; // Look for matching mask in any self-dependency bool stage_mask_match = false; for (const auto self_dep_index : self_dependencies) { const auto &sub_dep = dependencies[self_dep_index]; const auto &sub_src_stage_mask = ExpandPipelineStageFlags(device_extensions, sub_dep.srcStageMask); const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(device_extensions, sub_dep.dstStageMask); stage_mask_match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (src_stage_mask == (sub_src_stage_mask & src_stage_mask))) && ((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask))); if (stage_mask_match) break; } if (!stage_mask_match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask of any " "self-dependency of subpass %d of %s for which dstStageMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, src_stage_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask of any " "self-dependency of subpass %d of %s for which srcStageMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, dst_stage_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } if (0 != buffer_mem_barrier_count) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178", "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", funcName, buffer_mem_barrier_count, active_subpass, report_data->FormatHandle(rp_handle).c_str()); } for (uint32_t i = 0; i < mem_barrier_count; ++i) { const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask; const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask; bool access_mask_match = false; for (const auto self_dep_index : self_dependencies) { const auto &sub_dep = dependencies[self_dep_index]; access_mask_match = (mb_src_access_mask == (sub_dep.srcAccessMask & mb_src_access_mask)) && (mb_dst_access_mask == (sub_dep.dstAccessMask & mb_dst_access_mask)); if (access_mask_match) break; } if (!access_mask_match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency srcAccessMask " "for any self-dependency of subpass %d of %s for which dstAccessMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, i, mb_src_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency dstAccessMask " "for any self-dependency of subpass %d of %s for which srcAccessMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, i, mb_dst_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } } skip |= ValidateRenderPassImageBarriers(funcName, cb_state, active_subpass, sub_desc, rp_handle, dependencies, self_dependencies, image_mem_barrier_count, image_barriers); bool flag_match = false; for (const auto self_dep_index : self_dependencies) { const auto &sub_dep = dependencies[self_dep_index]; flag_match = sub_dep.dependencyFlags == dependency_flags; if (flag_match) break; } if (!flag_match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle.handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any " "self-dependency of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", funcName, dependency_flags, cb_state->activeSubpass, report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } } return skip; } // Array to mask individual accessMask to corresponding stageMask // accessMask active bit position (0-31) maps to index const static VkPipelineStageFlags AccessMaskToPipeStage[28] = { // VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, // VK_ACCESS_INDEX_READ_BIT = 1 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, // VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, // VK_ACCESS_UNIFORM_READ_BIT = 3 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV, // VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, // VK_ACCESS_SHADER_READ_BIT = 5 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV, // VK_ACCESS_SHADER_WRITE_BIT = 6 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV, // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, // VK_ACCESS_TRANSFER_READ_BIT = 11 VK_PIPELINE_STAGE_TRANSFER_BIT, // VK_ACCESS_TRANSFER_WRITE_BIT = 12 VK_PIPELINE_STAGE_TRANSFER_BIT, // VK_ACCESS_HOST_READ_BIT = 13 VK_PIPELINE_STAGE_HOST_BIT, // VK_ACCESS_HOST_WRITE_BIT = 14 VK_PIPELINE_STAGE_HOST_BIT, // VK_ACCESS_MEMORY_READ_BIT = 15 VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match // VK_ACCESS_MEMORY_WRITE_BIT = 16 VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match // VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17 VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, // VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18 VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, // VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 19 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 20 VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, // VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 21 VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV | VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV, // VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 22 VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV, // VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 23 VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV, // VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 24 VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT, // VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 25 VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT, // VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 26 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, // VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 27 VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT, }; // Verify that all bits of access_mask are supported by the src_stage_mask static bool ValidateAccessMaskPipelineStage(const DeviceExtensions &extensions, VkAccessFlags access_mask, VkPipelineStageFlags stage_mask) { // Early out if all commands set, or access_mask NULL if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true; stage_mask = ExpandPipelineStageFlags(extensions, stage_mask); int index = 0; // for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set while (access_mask) { index = (u_ffs(access_mask) - 1); assert(index >= 0); // Must have "!= 0" compare to prevent warning from MSVC if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false; // early out access_mask &= ~(1 << index); // Mask off bit that's been checked } return true; } namespace barrier_queue_families { enum VuIndex { kSrcOrDstMustBeIgnore, kSpecialOrIgnoreOnly, kSrcIgnoreRequiresDstIgnore, kDstValidOrSpecialIfNotIgnore, kSrcValidOrSpecialIfNotIgnore, kSrcAndDestMustBeIgnore, kBothIgnoreOrBothValid, kSubmitQueueMustMatchSrcOrDst }; static const char *vu_summary[] = {"Source or destination queue family must be ignored.", "Source or destination queue family must be special or ignored.", "Destination queue family must be ignored if source queue family is.", "Destination queue family must be valid, ignored, or special.", "Source queue family must be valid, ignored, or special.", "Source and destination queue family must both be ignored.", "Source and destination queue family must both be ignore or both valid.", "Source or destination queue family must match submit queue family, if not ignored."}; static const std::string image_error_codes[] = { "VUID-VkImageMemoryBarrier-image-01381", // kSrcOrDstMustBeIgnore "VUID-VkImageMemoryBarrier-image-01766", // kSpecialOrIgnoreOnly "VUID-VkImageMemoryBarrier-image-01201", // kSrcIgnoreRequiresDstIgnore "VUID-VkImageMemoryBarrier-image-01768", // kDstValidOrSpecialIfNotIgnore "VUID-VkImageMemoryBarrier-image-01767", // kSrcValidOrSpecialIfNotIgnore "VUID-VkImageMemoryBarrier-image-01199", // kSrcAndDestMustBeIgnore "VUID-VkImageMemoryBarrier-image-01200", // kBothIgnoreOrBothValid "VUID-VkImageMemoryBarrier-image-01205", // kSubmitQueueMustMatchSrcOrDst }; static const std::string buffer_error_codes[] = { "VUID-VkBufferMemoryBarrier-buffer-01191", // kSrcOrDstMustBeIgnore "VUID-VkBufferMemoryBarrier-buffer-01763", // kSpecialOrIgnoreOnly "VUID-VkBufferMemoryBarrier-buffer-01193", // kSrcIgnoreRequiresDstIgnore "VUID-VkBufferMemoryBarrier-buffer-01765", // kDstValidOrSpecialIfNotIgnore "VUID-VkBufferMemoryBarrier-buffer-01764", // kSrcValidOrSpecialIfNotIgnore "VUID-VkBufferMemoryBarrier-buffer-01190", // kSrcAndDestMustBeIgnore "VUID-VkBufferMemoryBarrier-buffer-01192", // kBothIgnoreOrBothValid "VUID-VkBufferMemoryBarrier-buffer-01196", // kSubmitQueueMustMatchSrcOrDst }; class ValidatorState { public: ValidatorState(const CoreChecks *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state, const VulkanTypedHandle &barrier_handle, const VkSharingMode sharing_mode, const std::string *val_codes) : report_data_(device_data->report_data), func_name_(func_name), cb_handle64_(HandleToUint64(cb_state->commandBuffer)), barrier_handle_(barrier_handle), sharing_mode_(sharing_mode), val_codes_(val_codes), limit_(static_cast<uint32_t>(device_data->physical_device_state->queue_family_properties.size())), mem_ext_(device_data->device_extensions.vk_khr_external_memory) {} // Create a validator state from an image state... reducing the image specific to the generic version. ValidatorState(const CoreChecks *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state, const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state) : ValidatorState(device_data, func_name, cb_state, VulkanTypedHandle(barrier->image, kVulkanObjectTypeImage), state->createInfo.sharingMode, image_error_codes) {} // Create a validator state from an buffer state... reducing the buffer specific to the generic version. ValidatorState(const CoreChecks *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state, const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state) : ValidatorState(device_data, func_name, cb_state, VulkanTypedHandle(barrier->buffer, kVulkanObjectTypeBuffer), state->createInfo.sharingMode, buffer_error_codes) {} // Log the messages using boilerplate from object state, and Vu specific information from the template arg // One and two family versions, in the single family version, Vu holds the name of the passed parameter bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const { const std::string &val_code = val_codes_[vu_index]; const char *annotation = GetFamilyAnnotation(family); return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_, val_code, "%s: Barrier using %s %s created with sharingMode %s, has %s %u%s. %s", func_name_, GetTypeString(), report_data_->FormatHandle(barrier_handle_).c_str(), GetModeString(), param_name, family, annotation, vu_summary[vu_index]); } bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const { const std::string &val_code = val_codes_[vu_index]; const char *src_annotation = GetFamilyAnnotation(src_family); const char *dst_annotation = GetFamilyAnnotation(dst_family); return log_msg( report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_, val_code, "%s: Barrier using %s %s created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s", func_name_, GetTypeString(), report_data_->FormatHandle(barrier_handle_).c_str(), GetModeString(), src_family, src_annotation, dst_family, dst_annotation, vu_summary[vu_index]); } // This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed // data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for // application input. static bool ValidateAtQueueSubmit(const VkQueue queue, const CoreChecks *device_data, uint32_t src_family, uint32_t dst_family, const ValidatorState &val) { auto queue_data_it = device_data->queueMap.find(queue); if (queue_data_it == device_data->queueMap.end()) return false; uint32_t queue_family = queue_data_it->second.queueFamilyIndex; if ((src_family != queue_family) && (dst_family != queue_family)) { const std::string &val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst]; const char *src_annotation = val.GetFamilyAnnotation(src_family); const char *dst_annotation = val.GetFamilyAnnotation(dst_family); return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue), val_code, "%s: Barrier submitted to queue with family index %u, using %s %s created with sharingMode %s, has " "srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s", "vkQueueSubmit", queue_family, val.GetTypeString(), device_data->report_data->FormatHandle(val.barrier_handle_).c_str(), val.GetModeString(), src_family, src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]); } return false; } // Logical helpers for semantic clarity inline bool KhrExternalMem() const { return mem_ext_; } inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); } inline bool IsValidOrSpecial(uint32_t queue_family) const { return IsValid(queue_family) || (mem_ext_ && IsSpecial(queue_family)); } inline bool IsIgnored(uint32_t queue_family) const { return queue_family == VK_QUEUE_FAMILY_IGNORED; } // Helpers for LogMsg (and log_msg) const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); } // Descriptive text for the various types of queue family index const char *GetFamilyAnnotation(uint32_t family) const { const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)"; const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)"; const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)"; const char *valid = " (VALID)"; const char *invalid = " (INVALID)"; switch (family) { case VK_QUEUE_FAMILY_EXTERNAL_KHR: return external; case VK_QUEUE_FAMILY_FOREIGN_EXT: return foreign; case VK_QUEUE_FAMILY_IGNORED: return ignored; default: if (IsValid(family)) { return valid; } return invalid; }; } const char *GetTypeString() const { return object_string[barrier_handle_.type]; } VkSharingMode GetSharingMode() const { return sharing_mode_; } protected: const debug_report_data *const report_data_; const char *const func_name_; const uint64_t cb_handle64_; const VulkanTypedHandle barrier_handle_; const VkSharingMode sharing_mode_; const std::string *val_codes_; const uint32_t limit_; const bool mem_ext_; }; bool Validate(const CoreChecks *device_data, const char *func_name, CMD_BUFFER_STATE *cb_state, const ValidatorState &val, const uint32_t src_queue_family, const uint32_t dst_queue_family) { bool skip = false; const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT; const bool src_ignored = val.IsIgnored(src_queue_family); const bool dst_ignored = val.IsIgnored(dst_queue_family); if (val.KhrExternalMem()) { if (mode_concurrent) { if (!(src_ignored || dst_ignored)) { skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family); } if ((src_ignored && !(dst_ignored || IsSpecial(dst_queue_family))) || (dst_ignored && !(src_ignored || IsSpecial(src_queue_family)))) { skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family); } } else { // VK_SHARING_MODE_EXCLUSIVE if (src_ignored && !dst_ignored) { skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, src_queue_family, dst_queue_family); } if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) { skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, dst_queue_family, "dstQueueFamilyIndex"); } if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) { skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, src_queue_family, "srcQueueFamilyIndex"); } } } else { // No memory extension if (mode_concurrent) { if (!src_ignored || !dst_ignored) { skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family); } } else { // VK_SHARING_MODE_EXCLUSIVE if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) { skip |= val.LogMsg(kBothIgnoreOrBothValid, src_queue_family, dst_queue_family); } } } if (!mode_concurrent && !src_ignored && !dst_ignored) { // Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria // TODO create a better named list, or rename the submit time lists to something that matches the broader usage... // Note: if we want to create a semantic that separates state lookup, validation, and state update this should go // to a local queue of update_state_actions or something. cb_state->eventUpdates.emplace_back([device_data, src_queue_family, dst_queue_family, val](VkQueue queue) { return ValidatorState::ValidateAtQueueSubmit(queue, device_data, src_queue_family, dst_queue_family, val); }); } return skip; } } // namespace barrier_queue_families // Type specific wrapper for image barriers bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, CMD_BUFFER_STATE *cb_state, const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state_data) { // State data is required if (!state_data) { return false; } // Create the validator state from the image state barrier_queue_families::ValidatorState val(this, func_name, cb_state, barrier, state_data); const uint32_t src_queue_family = barrier->srcQueueFamilyIndex; const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex; return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family); } // Type specific wrapper for buffer barriers bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, CMD_BUFFER_STATE *cb_state, const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state_data) { // State data is required if (!state_data) { return false; } // Create the validator state from the buffer state barrier_queue_families::ValidatorState val(this, func_name, cb_state, barrier, state_data); const uint32_t src_queue_family = barrier->srcQueueFamilyIndex; const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex; return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family); } bool CoreChecks::ValidateBarriers(const char *funcName, CMD_BUFFER_STATE *cb_state, VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount, const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount, const VkImageMemoryBarrier *pImageMemBarriers) { bool skip = false; for (uint32_t i = 0; i < memBarrierCount; ++i) { const auto &mem_barrier = pMemBarriers[i]; if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184", "%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i, mem_barrier.srcAccessMask, src_stage_mask); } if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185", "%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i, mem_barrier.dstAccessMask, dst_stage_mask); } } for (uint32_t i = 0; i < imageMemBarrierCount; ++i) { auto mem_barrier = &pImageMemBarriers[i]; if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier->srcAccessMask, src_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184", "%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i, mem_barrier->srcAccessMask, src_stage_mask); } if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier->dstAccessMask, dst_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185", "%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i, mem_barrier->dstAccessMask, dst_stage_mask); } auto image_data = GetImageState(mem_barrier->image); skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, image_data); if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-newLayout-01198", "%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName); } if (image_data) { // There is no VUID for this, but there is blanket text: // "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before // recording commands in a command buffer." // TODO: Update this when VUID is defined skip |= ValidateMemoryIsBoundToImage(image_data, funcName, kVUIDUndefined); auto aspect_mask = mem_barrier->subresourceRange.aspectMask; skip |= ValidateImageAspectMask(image_data->image, image_data->createInfo.format, aspect_mask, funcName); std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange"; skip |= ValidateImageBarrierSubresourceRange(image_data, mem_barrier->subresourceRange, funcName, param_name.c_str()); } } for (uint32_t i = 0; i < bufferBarrierCount; ++i) { auto mem_barrier = &pBufferMemBarriers[i]; if (!mem_barrier) continue; if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier->srcAccessMask, src_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184", "%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i, mem_barrier->srcAccessMask, src_stage_mask); } if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier->dstAccessMask, dst_stage_mask)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185", "%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i, mem_barrier->dstAccessMask, dst_stage_mask); } // Validate buffer barrier queue family indices auto buffer_state = GetBufferState(mem_barrier->buffer); skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, buffer_state); if (buffer_state) { // There is no VUID for this, but there is blanket text: // "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before // recording commands in a command buffer" // TODO: Update this when VUID is defined skip |= ValidateMemoryIsBoundToBuffer(buffer_state, funcName, kVUIDUndefined); auto buffer_size = buffer_state->createInfo.size; if (mem_barrier->offset >= buffer_size) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-offset-01187", "%s: Buffer Barrier %s has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".", funcName, report_data->FormatHandle(mem_barrier->buffer).c_str(), HandleToUint64(mem_barrier->offset), HandleToUint64(buffer_size)); } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-size-01189", "%s: Buffer Barrier %s has offset 0x%" PRIx64 " and size 0x%" PRIx64 " whose sum is greater than total size 0x%" PRIx64 ".", funcName, report_data->FormatHandle(mem_barrier->buffer).c_str(), HandleToUint64(mem_barrier->offset), HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size)); } } } skip |= ValidateBarriersQFOTransferUniqueness(funcName, cb_state, bufferBarrierCount, pBufferMemBarriers, imageMemBarrierCount, pImageMemBarriers); return skip; } bool CoreChecks::ValidateEventStageMask(VkQueue queue, CMD_BUFFER_STATE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) { bool skip = false; VkPipelineStageFlags stageMask = 0; for (uint32_t i = 0; i < eventCount; ++i) { auto event = pCB->events[firstEventIndex + i]; auto queue_data = queueMap.find(queue); if (queue_data == queueMap.end()) return false; auto event_data = queue_data->second.eventToStageMap.find(event); if (event_data != queue_data->second.eventToStageMap.end()) { stageMask |= event_data->second; } else { auto global_event_data = GetEventState(event); if (!global_event_data) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, HandleToUint64(event), kVUID_Core_DrawState_InvalidEvent, "%s cannot be waited on if it has never been set.", report_data->FormatHandle(event).c_str()); } else { stageMask |= global_event_data->stageMask; } } } // TODO: Need to validate that host_bit is only set if set event is called // but set event can be called at any time. if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), "VUID-vkCmdWaitEvents-srcStageMask-parameter", "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of " "the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with " "vkSetEvent but instead is 0x%X.", sourceStageMask, stageMask); } return skip; } // Note that we only check bits that HAVE required queueflags -- don't care entries are skipped static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = { {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT}, {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT}, {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT}, {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT}, {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}}; static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT}; bool CoreChecks::CheckStageMaskQueueCompatibility(VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask, VkQueueFlags queue_flags, const char *function, const char *src_or_dest, const char *error_code) { bool skip = false; // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags for (const auto &item : stage_flag_bit_array) { if (stage_mask & item) { if ((supported_pipeline_stages_table[item] & queue_flags) == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), error_code, "%s(): %s flag %s is not compatible with the queue family properties of this command buffer.", function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item))); } } } return skip; } // Check if all barriers are of a given operation type. template <typename Barrier, typename OpCheck> bool AllTransferOp(const COMMAND_POOL_STATE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) { if (!pool) return false; for (uint32_t b = 0; b < count; b++) { if (!op_check(pool, barriers + b)) return false; } return true; } // Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(CMD_BUFFER_STATE *cb_state, uint32_t buffer_barrier_count, const VkBufferMemoryBarrier *buffer_barriers, uint32_t image_barrier_count, const VkImageMemoryBarrier *image_barriers) { auto pool = GetCommandPoolState(cb_state->createInfo.commandPool); BarrierOperationsType op_type = kGeneral; // Look at the barrier details only if they exist // Note: AllTransferOp returns true for count == 0 if ((buffer_barrier_count + image_barrier_count) != 0) { if (AllTransferOp(pool, TempIsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) && AllTransferOp(pool, TempIsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) { op_type = kAllRelease; } else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) && AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) { op_type = kAllAcquire; } } return op_type; } bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(CMD_BUFFER_STATE const *cb_state, VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask, BarrierOperationsType barrier_op_type, const char *function, const char *error_code) { bool skip = false; uint32_t queue_family_index = commandPoolMap[cb_state->createInfo.commandPool].get()->queueFamilyIndex; auto physical_device_state = GetPhysicalDeviceState(); // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool // that commandBuffer was allocated from, as specified in the table of supported pipeline stages. if (queue_family_index < physical_device_state->queue_family_properties.size()) { VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags; // Only check the source stage mask if any barriers aren't "acquire ownership" if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) { skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, source_stage_mask, specified_queue_flags, function, "srcStageMask", error_code); } // Only check the dest stage mask if any barriers aren't "release ownership" if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) { skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, dest_stage_mask, specified_queue_flags, function, "dstStageMask", error_code); } } return skip; } bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); bool skip = ValidateStageMasksAgainstQueueCapabilities(cb_state, sourceStageMask, dstStageMask, barrier_op_type, "vkCmdWaitEvents", "VUID-vkCmdWaitEvents-srcStageMask-01164"); skip |= ValidateStageMaskGsTsEnables(sourceStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-srcStageMask-01159", "VUID-vkCmdWaitEvents-srcStageMask-01161", "VUID-vkCmdWaitEvents-srcStageMask-02111", "VUID-vkCmdWaitEvents-srcStageMask-02112"); skip |= ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-dstStageMask-01160", "VUID-vkCmdWaitEvents-dstStageMask-01162", "VUID-vkCmdWaitEvents-dstStageMask-02113", "VUID-vkCmdWaitEvents-dstStageMask-02114"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdWaitEvents-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()"); skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()"); skip |= ValidateBarriers("vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); return skip; } void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); auto first_event_index = cb_state->events.size(); for (uint32_t i = 0; i < eventCount; ++i) { auto event_state = GetEventState(pEvents[i]); if (event_state) { AddCommandBufferBinding(&event_state->cb_bindings, VulkanTypedHandle(pEvents[i], kVulkanObjectTypeEvent), cb_state); event_state->cb_bindings.insert(cb_state); } cb_state->waitedEvents.insert(pEvents[i]); cb_state->events.push_back(pEvents[i]); } cb_state->eventUpdates.emplace_back( [=](VkQueue q) { return ValidateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask); }); TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers); if (enabled.gpu_validation) { GpuPreCallValidateCmdWaitEvents(sourceStageMask); } } void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordBarriersQFOTransfers(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); skip |= ValidateStageMasksAgainstQueueCapabilities(cb_state, srcStageMask, dstStageMask, barrier_op_type, "vkCmdPipelineBarrier", "VUID-vkCmdPipelineBarrier-srcStageMask-01183"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPipelineBarrier()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdPipelineBarrier-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()"); skip |= ValidateStageMaskGsTsEnables(srcStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-srcStageMask-01168", "VUID-vkCmdPipelineBarrier-srcStageMask-01170", "VUID-vkCmdPipelineBarrier-srcStageMask-02115", "VUID-vkCmdPipelineBarrier-srcStageMask-02116"); skip |= ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-dstStageMask-01169", "VUID-vkCmdPipelineBarrier-dstStageMask-01171", "VUID-vkCmdPipelineBarrier-dstStageMask-02117", "VUID-vkCmdPipelineBarrier-dstStageMask-02118"); if (cb_state->activeRenderPass) { skip |= ValidateRenderPassPipelineBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); if (skip) return true; // Early return to avoid redundant errors from below calls } skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()"); skip |= ValidateBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); return skip; } void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordBarriersQFOTransfers(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers); } bool CoreChecks::SetQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, QueryState value) { CMD_BUFFER_STATE *pCB = GetCBState(commandBuffer); if (pCB) { pCB->queryToStateMap[object] = value; } auto queue_data = queueMap.find(queue); if (queue_data != queueMap.end()) { queue_data->second.queryToStateMap[object] = value; } return false; } bool CoreChecks::ValidateBeginQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, VkFlags flags, CMD_TYPE cmd, const char *cmd_name, const char *vuid_queue_flags, const char *vuid_queue_feedback, const char *vuid_queue_occlusion, const char *vuid_precise, const char *vuid_query_count) { bool skip = false; const auto &query_pool_ci = GetQueryPoolState(query_obj.pool)->createInfo; // There are tighter queue constraints to test for certain query pools if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) { skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuid_queue_feedback); } if (query_pool_ci.queryType == VK_QUERY_TYPE_OCCLUSION) { skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuid_queue_occlusion); } skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuid_queue_flags); if (flags & VK_QUERY_CONTROL_PRECISE_BIT) { if (!enabled_features.core.occlusionQueryPrecise) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), vuid_precise, "%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but precise occlusion queries not enabled on the device.", cmd_name); } if (query_pool_ci.queryType != VK_QUERY_TYPE_OCCLUSION) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), vuid_precise, "%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but pool query type is not VK_QUERY_TYPE_OCCLUSION", cmd_name); } } if (query_obj.query >= query_pool_ci.queryCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), vuid_query_count, "%s: Query index %" PRIu32 " must be less than query count %" PRIu32 " of %s.", cmd_name, query_obj.query, query_pool_ci.queryCount, report_data->FormatHandle(query_obj.pool).c_str()); } skip |= ValidateCmd(cb_state, cmd, cmd_name); return skip; } void CoreChecks::RecordBeginQuery(CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj) { cb_state->activeQueries.insert(query_obj); cb_state->startedQueries.insert(query_obj); cb_state->queryUpdates.emplace_back([this, cb_state, query_obj](VkQueue q) { bool skip = false; skip |= VerifyQueryIsReset(q, cb_state->commandBuffer, query_obj); skip |= SetQueryState(q, cb_state->commandBuffer, query_obj, QUERYSTATE_RUNNING); return skip; }); AddCommandBufferBinding(&GetQueryPoolState(query_obj.pool)->cb_bindings, VulkanTypedHandle(query_obj.pool, kVulkanObjectTypeQueryPool), cb_state); } bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { if (disabled.query_validation) return false; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); QueryObject query_obj(queryPool, slot); return ValidateBeginQuery(cb_state, query_obj, flags, CMD_BEGINQUERY, "vkCmdBeginQuery()", "VUID-vkCmdBeginQuery-commandBuffer-cmdpool", "VUID-vkCmdBeginQuery-queryType-02327", "VUID-vkCmdBeginQuery-queryType-00803", "VUID-vkCmdBeginQuery-queryType-00800", "VUID-vkCmdBeginQuery-query-00802"); } bool CoreChecks::VerifyQueryIsReset(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject query_obj) { bool skip = false; auto queue_data = GetQueueState(queue); if (!queue_data) return false; QueryState state = GetQueryState(queue_data, query_obj.pool, query_obj.query); if (state != QUERYSTATE_RESET) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), kVUID_Core_DrawState_QueryNotReset, "vkCmdBeginQuery(): %s and query %" PRIu32 ": query not reset. " "After query pool creation, each query must be reset before it is used. " "Queries must also be reset between uses.", report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query); } return skip; } void CoreChecks::PostCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { QueryObject query = {queryPool, slot}; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordBeginQuery(cb_state, query); } bool CoreChecks::ValidateCmdEndQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, CMD_TYPE cmd, const char *cmd_name, const char *vuid_queue_flags, const char *vuid_active_queries) { bool skip = false; if (!cb_state->activeQueries.count(query_obj)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), vuid_active_queries, "%s: Ending a query before it was started: %s, index %d.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query); } skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuid_queue_flags); skip |= ValidateCmd(cb_state, cmd, cmd_name); return skip; } bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { if (disabled.query_validation) return false; QueryObject query_obj = {queryPool, slot}; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); return ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERY, "vkCmdEndQuery()", "VUID-vkCmdEndQuery-commandBuffer-cmdpool", "VUID-vkCmdEndQuery-None-01923"); } void CoreChecks::RecordCmdEndQuery(CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj) { cb_state->activeQueries.erase(query_obj); cb_state->queryUpdates.emplace_back([this, cb_state, query_obj](VkQueue q) { return SetQueryState(q, cb_state->commandBuffer, query_obj, QUERYSTATE_AVAILABLE); }); AddCommandBufferBinding(&GetQueryPoolState(query_obj.pool)->cb_bindings, VulkanTypedHandle(query_obj.pool, kVulkanObjectTypeQueryPool), cb_state); } void CoreChecks::PostCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { QueryObject query_obj = {queryPool, slot}; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordCmdEndQuery(cb_state, query_obj); } bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { if (disabled.query_validation) return false; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = InsideRenderPass(cb_state, "vkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-renderpass"); skip |= ValidateCmd(cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()"); skip |= ValidateCmdQueueFlags(cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdResetQueryPool-commandBuffer-cmdpool"); return skip; } void CoreChecks::PostCallRecordCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); for (uint32_t i = 0; i < queryCount; i++) { QueryObject query = {queryPool, firstQuery + i}; cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents; cb_state->queryUpdates.emplace_back( [this, commandBuffer, query](VkQueue q) { return SetQueryState(q, commandBuffer, query, QUERYSTATE_RESET); }); } AddCommandBufferBinding(&GetQueryPoolState(queryPool)->cb_bindings, VulkanTypedHandle(queryPool, kVulkanObjectTypeQueryPool), cb_state); } QueryState CoreChecks::GetQueryState(QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) { QueryObject query = {queryPool, queryIndex}; const std::array<decltype(queryToStateMap) *, 2> map_list = {&queue_data->queryToStateMap, &queryToStateMap}; for (const auto map : map_list) { auto query_data = map->find(query); if (query_data != map->end()) { return query_data->second; } } return QUERYSTATE_UNKNOWN; } QueryResultType CoreChecks::GetQueryResultType(QueryState state, VkQueryResultFlags flags) { switch (state) { case QUERYSTATE_UNKNOWN: return QUERYRESULT_UNKNOWN; case QUERYSTATE_RESET: case QUERYSTATE_RUNNING: if (flags & VK_QUERY_RESULT_WAIT_BIT) { return ((state == QUERYSTATE_RESET) ? QUERYRESULT_WAIT_ON_RESET : QUERYRESULT_WAIT_ON_RUNNING); } else if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) { return QUERYRESULT_SOME_DATA; } else { return QUERYRESULT_NO_DATA; } case QUERYSTATE_AVAILABLE: if ((flags & VK_QUERY_RESULT_WAIT_BIT) || (flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) { return QUERYRESULT_SOME_DATA; } else { return QUERYRESULT_MAYBE_NO_DATA; } } assert(false); return QUERYRESULT_UNKNOWN; } bool CoreChecks::ValidateQuery(VkQueue queue, CMD_BUFFER_STATE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkQueryResultFlags flags) { bool skip = false; auto queue_data = GetQueueState(queue); if (!queue_data) return false; for (uint32_t i = 0; i < queryCount; i++) { QueryState state = GetQueryState(queue_data, queryPool, firstQuery + i); QueryResultType result_type = GetQueryResultType(state, flags); if (result_type != QUERYRESULT_SOME_DATA) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidQuery, "Requesting a copy from query to buffer on %s query %" PRIu32 ": %s", report_data->FormatHandle(queryPool).c_str(), firstQuery + i, string_QueryResultType(result_type)); } } return skip; } bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { if (disabled.query_validation) return false; auto cb_state = GetCBState(commandBuffer); auto dst_buff_state = GetBufferState(dstBuffer); assert(cb_state); assert(dst_buff_state); bool skip = ValidateMemoryIsBoundToBuffer(dst_buff_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826"); skip |= ValidateQueryPoolStride("VUID-vkCmdCopyQueryPoolResults-flags-00822", "VUID-vkCmdCopyQueryPoolResults-flags-00823", stride, "dstOffset", dstOffset, flags); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdCopyQueryPoolResults()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdCopyQueryPoolResults-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()"); skip |= InsideRenderPass(cb_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-renderpass"); return skip; } void CoreChecks::PostCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { auto cb_state = GetCBState(commandBuffer); auto dst_buff_state = GetBufferState(dstBuffer); AddCommandBufferBindingBuffer(cb_state, dst_buff_state); cb_state->queryUpdates.emplace_back([this, cb_state, queryPool, firstQuery, queryCount, flags](VkQueue q) { return ValidateQuery(q, cb_state, queryPool, firstQuery, queryCount, flags); }); AddCommandBufferBinding(&GetQueryPoolState(queryPool)->cb_bindings, VulkanTypedHandle(queryPool, kVulkanObjectTypeQueryPool), cb_state); } bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void *pValues) { bool skip = false; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdPushConstants-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()"); skip |= ValidatePushConstantRange(offset, size, "vkCmdPushConstants()"); if (0 == stageFlags) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-stageFlags-requiredbitmask", "vkCmdPushConstants() call has no stageFlags set."); } // Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command // stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range. if (!skip) { const auto &ranges = *GetPipelineLayout(layout)->push_constant_ranges; VkShaderStageFlags found_stages = 0; for (const auto &range : ranges) { if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) { VkShaderStageFlags matching_stages = range.stageFlags & stageFlags; if (matching_stages != range.stageFlags) { // "VUID-vkCmdPushConstants-offset-01796" VUID-vkCmdPushConstants-offset-01796 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-offset-01796", "vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32 "), must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32 "), offset (%" PRIu32 "), and size (%" PRIu32 ") in %s.", (uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size, report_data->FormatHandle(layout).c_str()); } // Accumulate all stages we've found found_stages = matching_stages | found_stages; } } if (found_stages != stageFlags) { // "VUID-vkCmdPushConstants-offset-01795" VUID-vkCmdPushConstants-offset-01795 uint32_t missing_stages = ~found_stages & stageFlags; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-offset-01795", "vkCmdPushConstants(): stageFlags = 0x%" PRIx32 ", VkPushConstantRange in %s overlapping offset = %d and size = %d, do not contain " "stageFlags 0x%" PRIx32 ".", (uint32_t)stageFlags, report_data->FormatHandle(layout).c_str(), offset, size, missing_stages); } } return skip; } bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { if (disabled.query_validation) return false; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdWriteTimestamp()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT, "VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()"); return skip; } void CoreChecks::PostCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); QueryObject query = {queryPool, slot}; cb_state->queryUpdates.emplace_back([this, commandBuffer, query](VkQueue q) { bool skip = false; skip |= VerifyQueryIsReset(q, commandBuffer, query); skip |= SetQueryState(q, commandBuffer, query, QUERYSTATE_AVAILABLE); return skip; }); AddCommandBufferBinding(&GetQueryPoolState(queryPool)->cb_bindings, VulkanTypedHandle(queryPool, kVulkanObjectTypeQueryPool), cb_state); } bool CoreChecks::MatchUsage(uint32_t count, const VkAttachmentReference2KHR *attachments, const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag, const char *error_code) { bool skip = false; for (uint32_t attach = 0; attach < count; attach++) { if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) { // Attachment counts are verified elsewhere, but prevent an invalid access if (attachments[attach].attachment < fbci->attachmentCount) { const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment]; auto view_state = GetImageViewState(*image_view); if (view_state) { const VkImageCreateInfo *ici = &GetImageState(view_state->create_info.image)->createInfo; if (ici != nullptr) { if ((ici->usage & usage_flag) == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, error_code, "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's " "IMAGE_USAGE flags (%s).", attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag)); } } } } } } return skip; } // Validate VkFramebufferCreateInfo which includes: // 1. attachmentCount equals renderPass attachmentCount // 2. corresponding framebuffer and renderpass attachments have matching formats // 3. corresponding framebuffer and renderpass attachments have matching sample counts // 4. fb attachments only have a single mip level // 5. fb attachment dimensions are each at least as large as the fb // 6. fb attachments use idenity swizzle // 7. fb attachments used by renderPass for color/input/ds have correct usage bit set // 8. fb dimensions are within physical device limits bool CoreChecks::ValidateFramebufferCreateInfo(const VkFramebufferCreateInfo *pCreateInfo) { bool skip = false; auto rp_state = GetRenderPassState(pCreateInfo->renderPass); if (rp_state) { const VkRenderPassCreateInfo2KHR *rpci = rp_state->createInfo.ptr(); if (rpci->attachmentCount != pCreateInfo->attachmentCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-attachmentCount-00876", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount " "of %u of %s being used to create Framebuffer.", pCreateInfo->attachmentCount, rpci->attachmentCount, report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } else { // attachmentCounts match, so make sure corresponding attachment details line up const VkImageView *image_views = pCreateInfo->pAttachments; for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { auto view_state = GetImageViewState(image_views[i]); auto &ivci = view_state->create_info; if (ivci.format != rpci->pAttachments[i].format) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00880", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not " "match the format of %s used by the corresponding attachment for %s.", i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } const VkImageCreateInfo *ici = &GetImageState(ivci.image)->createInfo; if (ici->samples != rpci->pAttachments[i].samples) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00881", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match the %s " "samples used by the corresponding attachment for %s.", i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } // Verify that view only has a single mip level if (ivci.subresourceRange.levelCount != 1) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-pAttachments-00883", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but " "only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.", i, ivci.subresourceRange.levelCount); } const uint32_t mip_level = ivci.subresourceRange.baseMipLevel; uint32_t mip_width = max(1u, ici->extent.width >> mip_level); uint32_t mip_height = max(1u, ici->extent.height >> mip_level); if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) || (mip_height < pCreateInfo->height)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-pAttachments-00882", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions " "smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for " "attachment #%u, framebuffer:\n" "width: %u, %u\n" "height: %u, %u\n" "layerCount: %u, %u\n", i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height, pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers); } if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) || ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) || ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) || ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-pAttachments-00884", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All " "framebuffer attachments must have been created with the identity swizzle. Here are the actual " "swizzle values:\n" "r swizzle = %s\n" "g swizzle = %s\n" "b swizzle = %s\n" "a swizzle = %s\n", i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g), string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a)); } } } // Verify correct attachment usage flags for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) { // Verify input attachments: skip |= MatchUsage(rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879"); // Verify color attachments: skip |= MatchUsage(rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877"); // Verify depth/stencil attachments: if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) { skip |= MatchUsage(1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633"); } } } // Verify FB dimensions are within physical device limits if (pCreateInfo->width > phys_dev_props.limits.maxFramebufferWidth) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-width-00886", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested " "width: %u, device max: %u\n", pCreateInfo->width, phys_dev_props.limits.maxFramebufferWidth); } if (pCreateInfo->height > phys_dev_props.limits.maxFramebufferHeight) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-height-00888", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested " "height: %u, device max: %u\n", pCreateInfo->height, phys_dev_props.limits.maxFramebufferHeight); } if (pCreateInfo->layers > phys_dev_props.limits.maxFramebufferLayers) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-layers-00890", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested " "layers: %u, device max: %u\n", pCreateInfo->layers, phys_dev_props.limits.maxFramebufferLayers); } // Verify FB dimensions are greater than zero if (pCreateInfo->width <= 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-width-00885", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero."); } if (pCreateInfo->height <= 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-height-00887", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero."); } if (pCreateInfo->layers <= 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkFramebufferCreateInfo-layers-00889", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero."); } return skip; } bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) { // TODO : Verify that renderPass FB is created with is compatible with FB bool skip = false; skip |= ValidateFramebufferCreateInfo(pCreateInfo); return skip; } void CoreChecks::PostCallRecordCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer, VkResult result) { if (VK_SUCCESS != result) return; // Shadow create info and store in map std::unique_ptr<FRAMEBUFFER_STATE> fb_state( new FRAMEBUFFER_STATE(*pFramebuffer, pCreateInfo, GetRenderPassStateSharedPtr(pCreateInfo->renderPass))); for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { VkImageView view = pCreateInfo->pAttachments[i]; auto view_state = GetImageViewState(view); if (!view_state) { continue; } } frameBufferMap[*pFramebuffer] = std::move(fb_state); } static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node, std::unordered_set<uint32_t> &processed_nodes) { // If we have already checked this node we have not found a dependency path so return false. if (processed_nodes.count(index)) return false; processed_nodes.insert(index); const DAGNode &node = subpass_to_node[index]; // Look for a dependency path. If one exists return true else recurse on the previous nodes. if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) { for (auto elem : node.prev) { if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true; } } else { return true; } return false; } bool CoreChecks::CheckDependencyExists(const uint32_t subpass, const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node, bool &skip) { bool result = true; // Loop through all subpasses that share the same attachment and make sure a dependency exists for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) { if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue; const DAGNode &node = subpass_to_node[subpass]; // Check for a specified dependency between the two nodes. If one exists we are done. auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]); auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]); if (prev_elem == node.prev.end() && next_elem == node.next.end()) { // If no dependency exits an implicit dependency still might. If not, throw an error. std::unordered_set<uint32_t> processed_nodes; if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) || FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidRenderpass, "A dependency between subpasses %d and %d must exist but one is not specified.", subpass, dependent_subpasses[k]); result = false; } } } return result; } bool CoreChecks::CheckPreserved(const VkRenderPassCreateInfo2KHR *pCreateInfo, const int index, const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) { const DAGNode &node = subpass_to_node[index]; // If this node writes to the attachment return true as next nodes need to preserve the attachment. const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[index]; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { if (attachment == subpass.pColorAttachments[j].attachment) return true; } for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { if (attachment == subpass.pInputAttachments[j].attachment) return true; } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (attachment == subpass.pDepthStencilAttachment->attachment) return true; } bool result = false; // Loop through previous nodes and see if any of them write to the attachment. for (auto elem : node.prev) { result |= CheckPreserved(pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip); } // If the attachment was written to by a previous node than this node needs to preserve it. if (result && depth > 0) { bool has_preserved = false; for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { if (subpass.pPreserveAttachments[j] == attachment) { has_preserved = true; break; } } if (!has_preserved) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidRenderpass, "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index); } } return result; } template <class T> bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) { return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) || ((offset1 > offset2) && (offset1 < (offset2 + size2))); } bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) { return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) && IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount)); } bool CoreChecks::ValidateDependencies(FRAMEBUFFER_STATE const *framebuffer, RENDER_PASS_STATE const *renderPass) { bool skip = false; auto const pFramebufferInfo = framebuffer->createInfo.ptr(); auto const pCreateInfo = renderPass->createInfo.ptr(); auto const &subpass_to_node = renderPass->subpassToNode; std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount); std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount); std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount); // Find overlapping attachments for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) { VkImageView viewi = pFramebufferInfo->pAttachments[i]; VkImageView viewj = pFramebufferInfo->pAttachments[j]; if (viewi == viewj) { overlapping_attachments[i].push_back(j); overlapping_attachments[j].push_back(i); continue; } auto view_state_i = GetImageViewState(viewi); auto view_state_j = GetImageViewState(viewj); if (!view_state_i || !view_state_j) { continue; } auto view_ci_i = view_state_i->create_info; auto view_ci_j = view_state_j->create_info; if (view_ci_i.image == view_ci_j.image && IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) { overlapping_attachments[i].push_back(j); overlapping_attachments[j].push_back(i); continue; } auto image_data_i = GetImageState(view_ci_i.image); auto image_data_j = GetImageState(view_ci_j.image); if (!image_data_i || !image_data_j) { continue; } if (image_data_i->binding.mem == image_data_j->binding.mem && IsRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset, image_data_j->binding.size)) { overlapping_attachments[i].push_back(j); overlapping_attachments[j].push_back(i); } } } // Find for each attachment the subpasses that use them. unordered_set<uint32_t> attachmentIndices; for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i]; attachmentIndices.clear(); for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { uint32_t attachment = subpass.pInputAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; input_attachment_to_subpass[attachment].push_back(i); for (auto overlapping_attachment : overlapping_attachments[attachment]) { input_attachment_to_subpass[overlapping_attachment].push_back(i); } } for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { uint32_t attachment = subpass.pColorAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; output_attachment_to_subpass[attachment].push_back(i); for (auto overlapping_attachment : overlapping_attachments[attachment]) { output_attachment_to_subpass[overlapping_attachment].push_back(i); } attachmentIndices.insert(attachment); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { uint32_t attachment = subpass.pDepthStencilAttachment->attachment; output_attachment_to_subpass[attachment].push_back(i); for (auto overlapping_attachment : overlapping_attachments[attachment]) { output_attachment_to_subpass[overlapping_attachment].push_back(i); } if (attachmentIndices.count(attachment)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidRenderpass, "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i); } } } // If there is a dependency needed make sure one exists for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i]; // If the attachment is an input then all subpasses that output must have a dependency relationship for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { uint32_t attachment = subpass.pInputAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; CheckDependencyExists(i, output_attachment_to_subpass[attachment], subpass_to_node, skip); } // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { uint32_t attachment = subpass.pColorAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; CheckDependencyExists(i, output_attachment_to_subpass[attachment], subpass_to_node, skip); CheckDependencyExists(i, input_attachment_to_subpass[attachment], subpass_to_node, skip); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment; CheckDependencyExists(i, output_attachment_to_subpass[attachment], subpass_to_node, skip); CheckDependencyExists(i, input_attachment_to_subpass[attachment], subpass_to_node, skip); } } // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was // written. for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i]; for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { CheckPreserved(pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip); } } return skip; } void CoreChecks::RecordRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2KHR *pCreateInfo, RENDER_PASS_STATE *render_pass) { auto &subpass_to_node = render_pass->subpassToNode; subpass_to_node.resize(pCreateInfo->subpassCount); auto &self_dependencies = render_pass->self_dependencies; self_dependencies.resize(pCreateInfo->subpassCount); for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { subpass_to_node[i].pass = i; self_dependencies[i].clear(); } for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { const VkSubpassDependency2KHR &dependency = pCreateInfo->pDependencies[i]; if ((dependency.srcSubpass != VK_SUBPASS_EXTERNAL) && (dependency.dstSubpass != VK_SUBPASS_EXTERNAL)) { if (dependency.srcSubpass == dependency.dstSubpass) { self_dependencies[dependency.srcSubpass].push_back(i); } else { subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass); subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass); } } } } bool CoreChecks::ValidateRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2KHR *pCreateInfo, RENDER_PASS_STATE *render_pass) { // Shorthand... auto &subpass_to_node = render_pass->subpassToNode; subpass_to_node.resize(pCreateInfo->subpassCount); auto &self_dependencies = render_pass->self_dependencies; self_dependencies.resize(pCreateInfo->subpassCount); bool skip = false; const char *vuid; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { subpass_to_node[i].pass = i; self_dependencies[i].clear(); } for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { const VkSubpassDependency2KHR &dependency = pCreateInfo->pDependencies[i]; VkPipelineStageFlags exclude_graphics_pipeline_stages = ~(VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT | ExpandPipelineStageFlags(device_extensions, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT)); VkPipelineStageFlagBits latest_src_stage = GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask); VkPipelineStageFlagBits earliest_dst_stage = GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask); // This VU is actually generalised to *any* pipeline - not just graphics - but only graphics render passes are // currently supported by the spec - so only that pipeline is checked here. // If that is ever relaxed, this check should be extended to cover those pipelines. if (dependency.srcSubpass == dependency.dstSubpass && (dependency.srcStageMask & exclude_graphics_pipeline_stages) != 0u && (dependency.dstStageMask & exclude_graphics_pipeline_stages) != 0u) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-02244" : "VUID-VkSubpassDependency-srcSubpass-01989"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u is a self-dependency, but specifies stage masks that contain stages not in the GRAPHICS pipeline.", i); } else if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL && (dependency.srcStageMask & VK_PIPELINE_STAGE_HOST_BIT)) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03078" : "VUID-VkSubpassDependency-srcSubpass-00858"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a dependency from subpass %u, but includes HOST_BIT in the source stage mask.", i, dependency.srcSubpass); } else if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL && (dependency.dstStageMask & VK_PIPELINE_STAGE_HOST_BIT)) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-dstSubpass-03079" : "VUID-VkSubpassDependency-dstSubpass-00859"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a dependency to subpass %u, but includes HOST_BIT in the destination stage mask.", i, dependency.dstSubpass); } // These next two VUs are actually generalised to *any* pipeline - not just graphics - but only graphics render passes are // currently supported by the spec - so only that pipeline is checked here. // If that is ever relaxed, these next two checks should be extended to cover those pipelines. else if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL && pCreateInfo->pSubpasses[dependency.srcSubpass].pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS && (dependency.srcStageMask & exclude_graphics_pipeline_stages) != 0u) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03054" : "VUID-VkRenderPassCreateInfo-pDependencies-00837"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a source stage mask that contains stages not in the GRAPHICS pipeline as used " "by the source subpass %u.", i, dependency.srcSubpass); } else if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL && pCreateInfo->pSubpasses[dependency.dstSubpass].pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS && (dependency.dstStageMask & exclude_graphics_pipeline_stages) != 0u) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03055" : "VUID-VkRenderPassCreateInfo-pDependencies-00838"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a destination stage mask that contains stages not in the GRAPHICS pipeline as " "used by the destination subpass %u.", i, dependency.dstSubpass); } // The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if // any are, which enables multiview. else if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && (pCreateInfo->pSubpasses[0].viewMask == 0)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo2KHR-viewMask-03059", "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i); } else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDependency2KHR-dependencyFlags-03092", "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i, dependency.viewOffset); } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) { if (dependency.srcSubpass == dependency.dstSubpass) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "The src and dst subpasses in dependency %u are both external.", i); } else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) { if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) { vuid = "VUID-VkSubpassDependency-dependencyFlags-02520"; } else { // dependency.dstSubpass == VK_SUBPASS_EXTERNAL vuid = "VUID-VkSubpassDependency-dependencyFlags-02521"; } if (use_rp2) { // Create render pass 2 distinguishes between source and destination external dependencies. if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) { vuid = "VUID-VkSubpassDependency2KHR-dependencyFlags-03090"; } else { vuid = "VUID-VkSubpassDependency2KHR-dependencyFlags-03091"; } } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i); } } else if (dependency.srcSubpass > dependency.dstSubpass) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is " "disallowed to prevent cyclic dependencies.", i, dependency.srcSubpass, dependency.dstSubpass); } else if (dependency.srcSubpass == dependency.dstSubpass) { if (dependency.viewOffset != 0) { vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pNext-01930"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i, dependency.viewOffset); } else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags && pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not " "specify VK_DEPENDENCY_VIEW_LOCAL_BIT.", i, dependency.srcSubpass); } else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) || HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) && (GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) > GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).", i, string_VkPipelineStageFlagBits(latest_src_stage), string_VkPipelineStageFlagBits(earliest_dst_stage)); } else { self_dependencies[dependency.srcSubpass].push_back(i); } } else { subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass); subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass); } } return skip; } bool CoreChecks::ValidateAttachmentIndex(RenderPassCreateVersion rp_version, uint32_t attachment, uint32_t attachment_count, const char *type) { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()"; if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) { const char *vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: %s attachment %d must be less than the total number of attachments %d.", type, function_name, attachment, attachment_count); } return skip; } enum AttachmentType { ATTACHMENT_COLOR = 1, ATTACHMENT_DEPTH = 2, ATTACHMENT_INPUT = 4, ATTACHMENT_PRESERVE = 8, ATTACHMENT_RESOLVE = 16, }; char const *StringAttachmentType(uint8_t type) { switch (type) { case ATTACHMENT_COLOR: return "color"; case ATTACHMENT_DEPTH: return "depth"; case ATTACHMENT_INPUT: return "input"; case ATTACHMENT_PRESERVE: return "preserve"; case ATTACHMENT_RESOLVE: return "resolve"; default: return "(multiple)"; } } bool CoreChecks::AddAttachmentUse(RenderPassCreateVersion rp_version, uint32_t subpass, std::vector<uint8_t> &attachment_uses, std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use, VkImageLayout new_layout) { if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */ bool skip = false; auto &uses = attachment_uses[attachment]; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()"; if (uses & new_use) { if (attachment_layouts[attachment] != new_layout) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-layout-02528" : "VUID-VkSubpassDescription-layout-02519"; log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).", function_name, subpass, attachment, string_VkImageLayout(attachment_layouts[attachment]), string_VkImageLayout(new_layout)); } } else if (uses & ~ATTACHMENT_INPUT || (uses && (new_use == ATTACHMENT_RESOLVE || new_use == ATTACHMENT_PRESERVE))) { /* Note: input attachments are assumed to be done first. */ vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pPreserveAttachments-03074" : "VUID-VkSubpassDescription-pPreserveAttachments-00854"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass, attachment, StringAttachmentType(uses), StringAttachmentType(new_use)); } else { attachment_layouts[attachment] = new_layout; uses |= new_use; } return skip; } bool CoreChecks::ValidateRenderpassAttachmentUsage(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2KHR *pCreateInfo) { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()"; for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i]; std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount); std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount); if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pipelineBindPoint-03062" : "VUID-VkSubpassDescription-pipelineBindPoint-00844"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", function_name, i); } for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { auto const &attachment_ref = subpass.pInputAttachments[j]; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Input"); if (attachment_ref.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) { vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkInputAttachmentAspectReference-aspectMask-01964"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.", function_name, i, j); } if (attachment_ref.attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment, ATTACHMENT_INPUT, attachment_ref.layout); vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pNext-01963"; skip |= ValidateImageAspectMask(VK_NULL_HANDLE, pCreateInfo->pAttachments[attachment_ref.attachment].format, attachment_ref.aspectMask, function_name, vuid); } } if (rp_version == RENDER_PASS_VERSION_2) { // These are validated automatically as part of parameter validation for create renderpass 1 // as they are in a struct that only applies to input attachments - not so for v2. // Check for 0 if (attachment_ref.aspectMask == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescription2KHR-aspectMask-03176", "%s: Input attachment (%d) aspect mask must not be 0.", function_name, j); } else { const VkImageAspectFlags valid_bits = (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT); // Check for valid aspect mask bits if (attachment_ref.aspectMask & ~valid_bits) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescription2KHR-aspectMask-03175", "%s: Input attachment (%d) aspect mask (0x%" PRIx32 ")is invalid.", function_name, j, attachment_ref.aspectMask); } } } } for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { uint32_t attachment = subpass.pPreserveAttachments[j]; if (attachment == VK_ATTACHMENT_UNUSED) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j); } else { skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, "Preserve"); if (attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE, VkImageLayout(0) /* preserve doesn't have any layout */); } } } bool subpass_performs_resolve = false; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { if (subpass.pResolveAttachments) { auto const &attachment_ref = subpass.pResolveAttachments[j]; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Resolve"); if (attachment_ref.attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment, ATTACHMENT_RESOLVE, attachment_ref.layout); subpass_performs_resolve = true; if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03067" : "VUID-VkSubpassDescription-pResolveAttachments-00849"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u requests multisample resolve into attachment %u, which must " "have VK_SAMPLE_COUNT_1_BIT but has %s.", function_name, i, attachment_ref.attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples)); } } } } } if (subpass.pDepthStencilAttachment) { if (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, subpass.pDepthStencilAttachment->attachment, pCreateInfo->attachmentCount, "Depth"); if (subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, subpass.pDepthStencilAttachment->attachment, ATTACHMENT_DEPTH, subpass.pDepthStencilAttachment->layout); } } } uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { auto const &attachment_ref = subpass.pColorAttachments[j]; skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Color"); if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED && attachment_ref.attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment, ATTACHMENT_COLOR, attachment_ref.layout); VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_ref.attachment].samples; if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) { VkSampleCountFlagBits last_sample_count = pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples; if (current_sample_count != last_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pColorAttachments-03069" : "VUID-VkSubpassDescription-pColorAttachments-01417"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u attempts to render to color attachments with inconsistent sample counts." "Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has " "sample count %s.", function_name, i, j, string_VkSampleCountFlagBits(current_sample_count), last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count)); } } last_sample_count_attachment = j; if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03066" : "VUID-VkSubpassDescription-pResolveAttachments-00848"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u requests multisample resolve from attachment %u which has " "VK_SAMPLE_COUNT_1_BIT.", function_name, i, attachment_ref.attachment); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) { const auto depth_stencil_sample_count = pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples; if (device_extensions.vk_amd_mixed_attachment_samples) { if (pCreateInfo->pAttachments[attachment_ref.attachment].samples > depth_stencil_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pColorAttachments-03070" : "VUID-VkSubpassDescription-pColorAttachments-01506"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u pColorAttachments[%u] has %s which is larger than " "depth/stencil attachment %s.", function_name, i, j, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples), string_VkSampleCountFlagBits(depth_stencil_sample_count)); break; } } if (!device_extensions.vk_amd_mixed_attachment_samples && !device_extensions.vk_nv_framebuffer_mixed_samples && current_sample_count != depth_stencil_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pDepthStencilAttachment-03071" : "VUID-VkSubpassDescription-pDepthStencilAttachment-01418"; skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u attempts to render to use a depth/stencil attachment with sample count that differs " "from color attachment %u." "The depth attachment ref has sample count %s, whereas color attachment ref %u has sample count %s.", function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j, string_VkSampleCountFlagBits(current_sample_count)); break; } } } if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED && subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) { if (attachment_ref.attachment == VK_ATTACHMENT_UNUSED) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03065" : "VUID-VkSubpassDescription-pResolveAttachments-00847"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u requests multisample resolve from attachment %u which has " "attachment=VK_ATTACHMENT_UNUSED.", function_name, i, attachment_ref.attachment); } else { const auto &color_desc = pCreateInfo->pAttachments[attachment_ref.attachment]; const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment]; if (color_desc.format != resolve_desc.format) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03068" : "VUID-VkSubpassDescription-pResolveAttachments-00850"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: Subpass %u pColorAttachments[%u] resolves to an attachment with a " "different format. color format: %u, resolve format: %u.", function_name, i, j, color_desc.format, resolve_desc.format); } } } } } return skip; } static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass, uint32_t index, bool is_read) { if (index == VK_ATTACHMENT_UNUSED) return; if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read; } bool CoreChecks::ValidateCreateRenderPass(VkDevice device, RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2KHR *pCreateInfo, RENDER_PASS_STATE *render_pass) { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()"; // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with // ValidateLayouts. skip |= ValidateRenderpassAttachmentUsage(rp_version, pCreateInfo); render_pass->renderPass = VK_NULL_HANDLE; skip |= ValidateRenderPassDAG(rp_version, pCreateInfo, render_pass); // Validate multiview correlation and view masks bool viewMaskZero = false; bool viewMaskNonZero = false; for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i]; if (subpass.viewMask != 0) { viewMaskNonZero = true; } else { viewMaskZero = true; } if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 && (subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-flags-03076" : "VUID-VkSubpassDescription-flags-00856"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: The flags parameter of subpass description %u includes " "VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include " "VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.", function_name, i); } } if (rp_version == RENDER_PASS_VERSION_2) { if (viewMaskNonZero && viewMaskZero) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo2KHR-viewMask-03058", "%s: Some view masks are non-zero whilst others are zero.", function_name); } if (viewMaskZero && pCreateInfo->correlatedViewMaskCount != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo2KHR-viewMask-03057", "%s: Multiview is not enabled but correlation masks are still provided", function_name); } } uint32_t aggregated_cvms = 0; for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) { if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pCorrelatedViewMasks-03056" : "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i); } aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i]; } for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { auto const &dependency = pCreateInfo->pDependencies[i]; if (rp_version == RENDER_PASS_VERSION_2) { skip |= ValidateStageMaskGsTsEnables( dependency.srcStageMask, function_name, "VUID-VkSubpassDependency2KHR-srcStageMask-03080", "VUID-VkSubpassDependency2KHR-srcStageMask-03082", "VUID-VkSubpassDependency2KHR-srcStageMask-02103", "VUID-VkSubpassDependency2KHR-srcStageMask-02104"); skip |= ValidateStageMaskGsTsEnables( dependency.dstStageMask, function_name, "VUID-VkSubpassDependency2KHR-dstStageMask-03081", "VUID-VkSubpassDependency2KHR-dstStageMask-03083", "VUID-VkSubpassDependency2KHR-dstStageMask-02105", "VUID-VkSubpassDependency2KHR-dstStageMask-02106"); } else { skip |= ValidateStageMaskGsTsEnables( dependency.srcStageMask, function_name, "VUID-VkSubpassDependency-srcStageMask-00860", "VUID-VkSubpassDependency-srcStageMask-00862", "VUID-VkSubpassDependency-srcStageMask-02099", "VUID-VkSubpassDependency-srcStageMask-02100"); skip |= ValidateStageMaskGsTsEnables( dependency.dstStageMask, function_name, "VUID-VkSubpassDependency-dstStageMask-00861", "VUID-VkSubpassDependency-dstStageMask-00863", "VUID-VkSubpassDependency-dstStageMask-02101", "VUID-VkSubpassDependency-dstStageMask-02102"); } if (!ValidateAccessMaskPipelineStage(device_extensions, dependency.srcAccessMask, dependency.srcStageMask)) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcAccessMask-03088" : "VUID-VkSubpassDependency-srcAccessMask-00868"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: pDependencies[%u].srcAccessMask (0x%" PRIx32 ") is not supported by srcStageMask (0x%" PRIx32 ").", function_name, i, dependency.srcAccessMask, dependency.srcStageMask); } if (!ValidateAccessMaskPipelineStage(device_extensions, dependency.dstAccessMask, dependency.dstStageMask)) { vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-dstAccessMask-03089" : "VUID-VkSubpassDependency-dstAccessMask-00869"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid, "%s: pDependencies[%u].dstAccessMask (0x%" PRIx32 ") is not supported by dstStageMask (0x%" PRIx32 ").", function_name, i, dependency.dstAccessMask, dependency.dstStageMask); } } if (!skip) { skip |= ValidateLayouts(rp_version, device, pCreateInfo); } return skip; } bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { bool skip = false; // Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds) const VkRenderPassMultiviewCreateInfo *pMultiviewInfo = lvl_find_in_chain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext); if (pMultiviewInfo) { if (pMultiviewInfo->subpassCount && pMultiviewInfo->subpassCount != pCreateInfo->subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo-pNext-01928", "Subpass count is %u but multiview info has a subpass count of %u.", pCreateInfo->subpassCount, pMultiviewInfo->subpassCount); } else if (pMultiviewInfo->dependencyCount && pMultiviewInfo->dependencyCount != pCreateInfo->dependencyCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo-pNext-01929", "Dependency count is %u but multiview info has a dependency count of %u.", pCreateInfo->dependencyCount, pMultiviewInfo->dependencyCount); } } const VkRenderPassInputAttachmentAspectCreateInfo *pInputAttachmentAspectInfo = lvl_find_in_chain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext); if (pInputAttachmentAspectInfo) { for (uint32_t i = 0; i < pInputAttachmentAspectInfo->aspectReferenceCount; ++i) { uint32_t subpass = pInputAttachmentAspectInfo->pAspectReferences[i].subpass; uint32_t attachment = pInputAttachmentAspectInfo->pAspectReferences[i].inputAttachmentIndex; if (subpass >= pCreateInfo->subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo-pNext-01926", "Subpass index %u specified by input attachment aspect info %u is greater than the subpass " "count of %u for this render pass.", subpass, i, pCreateInfo->subpassCount); } else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkRenderPassCreateInfo-pNext-01927", "Input attachment index %u specified by input attachment aspect info %u is greater than the " "input attachment count of %u for this subpass.", attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount); } } } if (!skip) { auto render_pass = std::unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo)); skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_1, render_pass->createInfo.ptr(), render_pass.get()); } return skip; } void CoreChecks::RecordCreateRenderPassState(RenderPassCreateVersion rp_version, std::shared_ptr<RENDER_PASS_STATE> &render_pass, VkRenderPass *pRenderPass) { render_pass->renderPass = *pRenderPass; auto create_info = render_pass->createInfo.ptr(); RecordRenderPassDAG(RENDER_PASS_VERSION_1, create_info, render_pass.get()); for (uint32_t i = 0; i < create_info->subpassCount; ++i) { const VkSubpassDescription2KHR &subpass = create_info->pSubpasses[i]; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false); // resolve attachments are considered to be written if (subpass.pResolveAttachments) { MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false); } } if (subpass.pDepthStencilAttachment) { MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false); } for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true); } } // Even though render_pass is an rvalue-ref parameter, still must move s.t. move assignment is invoked. renderPassMap[*pRenderPass] = std::move(render_pass); } // Style note: // Use of rvalue reference exceeds reccommended usage of rvalue refs in google style guide, but intentionally forces caller to move // or copy. This is clearer than passing a pointer to shared_ptr and avoids the atomic increment/decrement of shared_ptr copy // construction or assignment. void CoreChecks::PostCallRecordCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass, VkResult result) { if (VK_SUCCESS != result) return; auto render_pass_state = std::make_shared<RENDER_PASS_STATE>(pCreateInfo); RecordCreateRenderPassState(RENDER_PASS_VERSION_1, render_pass_state, pRenderPass); } void CoreChecks::PostCallRecordCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass, VkResult result) { if (VK_SUCCESS != result) return; auto render_pass_state = std::make_shared<RENDER_PASS_STATE>(pCreateInfo); RecordCreateRenderPassState(RENDER_PASS_VERSION_2, render_pass_state, pRenderPass); } static bool ValidateDepthStencilResolve(const debug_report_data *report_data, const VkPhysicalDeviceDepthStencilResolvePropertiesKHR &depth_stencil_resolve_props, const VkRenderPassCreateInfo2KHR *pCreateInfo) { bool skip = false; // If the pNext list of VkSubpassDescription2KHR includes a VkSubpassDescriptionDepthStencilResolveKHR structure, // then that structure describes depth/stencil resolve operations for the subpass. for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) { VkSubpassDescription2KHR subpass = pCreateInfo->pSubpasses[i]; const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(subpass.pNext); if (resolve == nullptr) { continue; } if (resolve->pDepthStencilResolveAttachment != nullptr && resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03177", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u, but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.", i, resolve->pDepthStencilResolveAttachment->attachment); } if (resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR && resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03178", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u, but both depth and stencil resolve modes are " "VK_RESOLVE_MODE_NONE_KHR.", i, resolve->pDepthStencilResolveAttachment->attachment); } } if (resolve->pDepthStencilResolveAttachment != nullptr && pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples == VK_SAMPLE_COUNT_1_BIT) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03179", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u. However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.", i, resolve->pDepthStencilResolveAttachment->attachment); } if (pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].samples != VK_SAMPLE_COUNT_1_BIT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03180", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u which has sample count=VK_SAMPLE_COUNT_1_BIT.", i, resolve->pDepthStencilResolveAttachment->attachment); } VkFormat pDepthStencilAttachmentFormat = pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].format; VkFormat pDepthStencilResolveAttachmentFormat = pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format; if ((FormatDepthSize(pDepthStencilAttachmentFormat) != FormatDepthSize(pDepthStencilResolveAttachmentFormat)) || (FormatDepthNumericalType(pDepthStencilAttachmentFormat) != FormatDepthNumericalType(pDepthStencilResolveAttachmentFormat))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03181", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u which has a depth component (size %u). The depth component " "of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.", i, resolve->pDepthStencilResolveAttachment->attachment, FormatDepthSize(pDepthStencilResolveAttachmentFormat), FormatDepthSize(pDepthStencilAttachmentFormat)); } if ((FormatStencilSize(pDepthStencilAttachmentFormat) != FormatStencilSize(pDepthStencilResolveAttachmentFormat)) || (FormatStencilNumericalType(pDepthStencilAttachmentFormat) != FormatStencilNumericalType(pDepthStencilResolveAttachmentFormat))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03182", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with resolve attachment %u which has a stencil component (size %u). The stencil component " "of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.", i, resolve->pDepthStencilResolveAttachment->attachment, FormatStencilSize(pDepthStencilResolveAttachmentFormat), FormatStencilSize(pDepthStencilAttachmentFormat)); } if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR || resolve->depthResolveMode & depth_stencil_resolve_props.supportedDepthResolveModes)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-depthResolveMode-03183", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with invalid depthResolveMode=%u.", i, resolve->depthResolveMode); } if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR || resolve->stencilResolveMode & depth_stencil_resolve_props.supportedStencilResolveModes)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-stencilResolveMode-03184", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure with invalid stencilResolveMode=%u.", i, resolve->stencilResolveMode); } if (FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) && depth_stencil_resolve_props.independentResolve == VK_FALSE && depth_stencil_resolve_props.independentResolveNone == VK_FALSE && !(resolve->depthResolveMode == resolve->stencilResolveMode)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03185", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.", i, resolve->depthResolveMode, resolve->stencilResolveMode); } if (FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) && depth_stencil_resolve_props.independentResolve == VK_FALSE && depth_stencil_resolve_props.independentResolveNone == VK_TRUE && !(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR || resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03186", "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR " "structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical, or " "one of them must be %u.", i, resolve->depthResolveMode, resolve->stencilResolveMode, VK_RESOLVE_MODE_NONE_KHR); } } return skip; } bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { bool skip = false; if (device_extensions.vk_khr_depth_stencil_resolve) { skip |= ValidateDepthStencilResolve(report_data, phys_dev_ext_props.depth_stencil_resolve_props, pCreateInfo); } auto render_pass = std::make_shared<RENDER_PASS_STATE>(pCreateInfo); skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_2, render_pass->createInfo.ptr(), render_pass.get()); return skip; } bool CoreChecks::ValidatePrimaryCommandBuffer(const CMD_BUFFER_STATE *pCB, char const *cmd_name, const char *error_code) { bool skip = false; if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), error_code, "Cannot execute command %s on a secondary command buffer.", cmd_name); } return skip; } bool CoreChecks::VerifyRenderAreaBounds(const VkRenderPassBeginInfo *pRenderPassBegin) { bool skip = false; const safe_VkFramebufferCreateInfo *pFramebufferInfo = &GetFramebufferState(pRenderPassBegin->framebuffer)->createInfo; if (pRenderPassBegin->renderArea.offset.x < 0 || (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width || pRenderPassBegin->renderArea.offset.y < 0 || (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) { skip |= static_cast<bool>(log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidRenderArea, "Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width " "%d, height %d. Framebuffer: width %d, height %d.", pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width, pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height)); } return skip; } // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the // [load|store]Op flag must be checked // TODO: The memory valid flag in DEVICE_MEMORY_STATE should probably be split to track the validity of stencil memory separately. template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) { if (color_depth_op != op && stencil_op != op) { return false; } bool check_color_depth_load_op = !FormatIsStencilOnly(format); bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op; return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op))); } bool CoreChecks::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version, const VkRenderPassBeginInfo *pRenderPassBegin) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr; auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr; bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCmdBeginRenderPass2KHR()" : "vkCmdBeginRenderPass()"; if (render_pass_state) { uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR // Handle extension struct from EXT_sample_locations const VkRenderPassSampleLocationsBeginInfoEXT *pSampleLocationsBeginInfo = lvl_find_in_chain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext); if (pSampleLocationsBeginInfo) { for (uint32_t i = 0; i < pSampleLocationsBeginInfo->attachmentInitialSampleLocationsCount; ++i) { if (pSampleLocationsBeginInfo->pAttachmentInitialSampleLocations[i].attachmentIndex >= render_pass_state->createInfo.attachmentCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531", "Attachment index %u specified by attachment sample locations %u is greater than the " "attachment count of %u for the render pass being begun.", pSampleLocationsBeginInfo->pAttachmentInitialSampleLocations[i].attachmentIndex, i, render_pass_state->createInfo.attachmentCount); } } for (uint32_t i = 0; i < pSampleLocationsBeginInfo->postSubpassSampleLocationsCount; ++i) { if (pSampleLocationsBeginInfo->pPostSubpassSampleLocations[i].subpassIndex >= render_pass_state->createInfo.subpassCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532", "Subpass index %u specified by subpass sample locations %u is greater than the subpass count " "of %u for the render pass being begun.", pSampleLocationsBeginInfo->pPostSubpassSampleLocations[i].subpassIndex, i, render_pass_state->createInfo.subpassCount); } } } for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) { auto pAttachment = &render_pass_state->createInfo.pAttachments[i]; if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_CLEAR)) { clear_op_size = static_cast<uint32_t>(i) + 1; } } if (clear_op_size > pRenderPassBegin->clearValueCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(render_pass_state->renderPass), "VUID-VkRenderPassBeginInfo-clearValueCount-00902", "In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there " "must be at least %u entries in pClearValues array to account for the highest index attachment in " "%s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by " "attachment number so even if some pClearValues entries between 0 and %u correspond to attachments " "that aren't cleared they will be ignored.", function_name, pRenderPassBegin->clearValueCount, clear_op_size, report_data->FormatHandle(render_pass_state->renderPass).c_str(), clear_op_size, clear_op_size - 1); } skip |= VerifyRenderAreaBounds(pRenderPassBegin); skip |= VerifyFramebufferAndRenderPassLayouts(rp_version, cb_state, pRenderPassBegin, GetFramebufferState(pRenderPassBegin->framebuffer)); if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) { skip |= ValidateRenderPassCompatibility("render pass", render_pass_state, "framebuffer", framebuffer->rp_state.get(), function_name, "VUID-VkRenderPassBeginInfo-renderPass-00904"); } vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-renderpass" : "VUID-vkCmdBeginRenderPass-renderpass"; skip |= InsideRenderPass(cb_state, function_name, vuid); skip |= ValidateDependencies(framebuffer, render_pass_state); vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-bufferlevel" : "VUID-vkCmdBeginRenderPass-bufferlevel"; skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdBeginRenderPass-commandBuffer-cmdpool"; skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid); const CMD_TYPE cmd_type = use_rp2 ? CMD_BEGINRENDERPASS2KHR : CMD_BEGINRENDERPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); } auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext); if (chained_device_group_struct) { skip |= ValidateDeviceMaskToPhysicalDeviceCount( chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905"); skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906"); skip |= ValidateDeviceMaskToCommandBuffer( cb_state, chained_device_group_struct->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907"); if (chained_device_group_struct->deviceRenderAreaCount != 0 && chained_device_group_struct->deviceRenderAreaCount != physical_device_count) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908", "deviceRenderAreaCount[%" PRIu32 "] is invaild. Physical device count is %" PRIu32 ".", chained_device_group_struct->deviceRenderAreaCount, physical_device_count); } } return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin); return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfoKHR *pSubpassBeginInfo) { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin); return skip; } void CoreChecks::RecordCmdBeginRenderPassState(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassContents contents) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr; auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr; if (render_pass_state) { cb_state->activeFramebuffer = pRenderPassBegin->framebuffer; cb_state->activeRenderPass = render_pass_state; // This is a shallow copy as that is all that is needed for now cb_state->activeRenderPassBeginInfo = *pRenderPassBegin; cb_state->activeSubpass = 0; cb_state->activeSubpassContents = contents; cb_state->framebuffers.insert(pRenderPassBegin->framebuffer); // Connect this framebuffer and its children to this cmdBuffer AddFramebufferBinding(cb_state, framebuffer); // Connect this RP to cmdBuffer AddCommandBufferBinding(&render_pass_state->cb_bindings, VulkanTypedHandle(render_pass_state->renderPass, kVulkanObjectTypeRenderPass), cb_state); // transition attachments to the correct layouts for beginning of renderPass and first subpass TransitionBeginRenderPassLayouts(cb_state, render_pass_state, framebuffer); auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext); if (chained_device_group_struct) { cb_state->active_render_pass_device_mask = chained_device_group_struct->deviceMask; } else { cb_state->active_render_pass_device_mask = cb_state->initial_device_mask; } } } void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { RecordCmdBeginRenderPassState(commandBuffer, pRenderPassBegin, contents); } void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfoKHR *pSubpassBeginInfo) { RecordCmdBeginRenderPassState(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents); } bool CoreChecks::ValidateCmdNextSubpass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCmdNextSubpass2KHR()" : "vkCmdNextSubpass()"; vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-bufferlevel" : "VUID-vkCmdNextSubpass-bufferlevel"; skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdNextSubpass-commandBuffer-cmdpool"; skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid); const CMD_TYPE cmd_type = use_rp2 ? CMD_NEXTSUBPASS2KHR : CMD_NEXTSUBPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-renderpass" : "VUID-vkCmdNextSubpass-renderpass"; skip |= OutsideRenderPass(cb_state, function_name, vuid); auto subpassCount = cb_state->activeRenderPass->createInfo.subpassCount; if (cb_state->activeSubpass == subpassCount - 1) { vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-None-03102" : "VUID-vkCmdNextSubpass-None-00909"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), vuid, "%s: Attempted to advance beyond final subpass.", function_name); } return skip; } bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_1, commandBuffer); } bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo, const VkSubpassEndInfoKHR *pSubpassEndInfo) { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer); } void CoreChecks::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->activeSubpass++; cb_state->activeSubpassContents = contents; TransitionSubpassLayouts(cb_state, cb_state->activeRenderPass, cb_state->activeSubpass, GetFramebufferState(cb_state->activeRenderPassBeginInfo.framebuffer)); } void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { RecordCmdNextSubpass(commandBuffer, contents); } void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo, const VkSubpassEndInfoKHR *pSubpassEndInfo) { RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents); } bool CoreChecks::ValidateCmdEndRenderPass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCmdEndRenderPass2KHR()" : "vkCmdEndRenderPass()"; RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass; if (rp_state) { if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) { vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-None-03103" : "VUID-vkCmdEndRenderPass-None-00910"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), vuid, "%s: Called before reaching final subpass.", function_name); } } vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-renderpass" : "VUID-vkCmdEndRenderPass-renderpass"; skip |= OutsideRenderPass(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-bufferlevel" : "VUID-vkCmdEndRenderPass-bufferlevel"; skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid); vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdEndRenderPass-commandBuffer-cmdpool"; skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid); const CMD_TYPE cmd_type = use_rp2 ? CMD_ENDRENDERPASS2KHR : CMD_ENDRENDERPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_1, commandBuffer); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer); return skip; } void CoreChecks::RecordCmdEndRenderPassState(VkCommandBuffer commandBuffer) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); FRAMEBUFFER_STATE *framebuffer = GetFramebufferState(cb_state->activeFramebuffer); TransitionFinalSubpassLayouts(cb_state, &cb_state->activeRenderPassBeginInfo, framebuffer); cb_state->activeRenderPass = nullptr; cb_state->activeSubpass = 0; cb_state->activeFramebuffer = VK_NULL_HANDLE; } void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) { RecordCmdEndRenderPassState(commandBuffer); } void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) { RecordCmdEndRenderPassState(commandBuffer); } bool CoreChecks::ValidateFramebuffer(VkCommandBuffer primaryBuffer, const CMD_BUFFER_STATE *pCB, VkCommandBuffer secondaryBuffer, const CMD_BUFFER_STATE *pSubCB, const char *caller) { bool skip = false; if (!pSubCB->beginInfo.pInheritanceInfo) { return skip; } VkFramebuffer primary_fb = pCB->activeFramebuffer; VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer; if (secondary_fb != VK_NULL_HANDLE) { if (primary_fb != secondary_fb) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(primaryBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00099", "vkCmdExecuteCommands() called w/ invalid secondary %s which has a %s" " that is not the same as the primary command buffer's current active %s.", report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str(), report_data->FormatHandle(primary_fb).c_str()); } auto fb = GetFramebufferState(secondary_fb); if (!fb) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(primaryBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer, "vkCmdExecuteCommands() called w/ invalid %s which has invalid %s.", report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str()); return skip; } } return skip; } bool CoreChecks::ValidateSecondaryCommandBufferState(CMD_BUFFER_STATE *pCB, CMD_BUFFER_STATE *pSubCB) { bool skip = false; unordered_set<int> activeTypes; if (!disabled.query_validation) { for (auto queryObject : pCB->activeQueries) { auto query_pool_state = GetQueryPoolState(queryObject.pool); if (query_pool_state) { if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS && pSubCB->beginInfo.pInheritanceInfo) { VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics; if ((cmdBufStatistics & query_pool_state->createInfo.pipelineStatistics) != cmdBufStatistics) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), "VUID-vkCmdExecuteCommands-commandBuffer-00104", "vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s" ". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.", report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(queryObject.pool).c_str()); } } activeTypes.insert(query_pool_state->createInfo.queryType); } } for (auto queryObject : pSubCB->startedQueries) { auto query_pool_state = GetQueryPoolState(queryObject.pool); if (query_pool_state && activeTypes.count(query_pool_state->createInfo.queryType)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer, "vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s" " of type %d but a query of that type has been started on secondary %s.", report_data->FormatHandle(pCB->commandBuffer).c_str(), report_data->FormatHandle(queryObject.pool).c_str(), query_pool_state->createInfo.queryType, report_data->FormatHandle(pSubCB->commandBuffer).c_str()); } } } auto primary_pool = GetCommandPoolState(pCB->createInfo.commandPool); auto secondary_pool = GetCommandPoolState(pSubCB->createInfo.commandPool); if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pSubCB->commandBuffer), kVUID_Core_DrawState_InvalidQueueFamily, "vkCmdExecuteCommands(): Primary %s created in queue family %d has secondary " "%s created in queue family %d.", report_data->FormatHandle(pCB->commandBuffer).c_str(), primary_pool->queueFamilyIndex, report_data->FormatHandle(pSubCB->commandBuffer).c_str(), secondary_pool->queueFamilyIndex); } return skip; } bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; CMD_BUFFER_STATE *sub_cb_state = NULL; std::unordered_set<CMD_BUFFER_STATE *> linked_command_buffers = cb_state->linkedCommandBuffers; for (uint32_t i = 0; i < commandBuffersCount; i++) { sub_cb_state = GetCBState(pCommandBuffers[i]); assert(sub_cb_state); if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00088", "vkCmdExecuteCommands() called w/ Primary %s in element %u of pCommandBuffers array. All " "cmd buffers in pCommandBuffers array must be secondary.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), i); } else if (VK_COMMAND_BUFFER_LEVEL_SECONDARY == sub_cb_state->createInfo.level) { if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) { auto secondary_rp_state = GetRenderPassState(sub_cb_state->beginInfo.pInheritanceInfo->renderPass); if (cb_state->activeRenderPass && !(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00096", "vkCmdExecuteCommands(): Secondary %s is executed within a %s " "instance scope, but the Secondary Command Buffer does not have the " "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when " "the vkBeginCommandBuffer() was called.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass).c_str()); } else if (!cb_state->activeRenderPass && (sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00100", "vkCmdExecuteCommands(): Secondary %s is executed outside a render pass " "instance scope, but the Secondary Command Buffer does have the " "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when " "the vkBeginCommandBuffer() was called.", report_data->FormatHandle(pCommandBuffers[i]).c_str()); } else if (cb_state->activeRenderPass && (sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { // Make sure render pass is compatible with parent command buffer pass if has continue if (cb_state->activeRenderPass->renderPass != secondary_rp_state->renderPass) { skip |= ValidateRenderPassCompatibility( "primary command buffer", cb_state->activeRenderPass, "secondary command buffer", secondary_rp_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098"); } // If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB skip |= ValidateFramebuffer(commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state, "vkCmdExecuteCommands()"); if (!sub_cb_state->cmd_execute_commands_functions.empty()) { // Inherit primary's activeFramebuffer and while running validate functions for (auto &function : sub_cb_state->cmd_execute_commands_functions) { skip |= function(cb_state, cb_state->activeFramebuffer); } } } } } // TODO(mlentine): Move more logic into this method skip |= ValidateSecondaryCommandBufferState(cb_state, sub_cb_state); skip |= ValidateCommandBufferState(sub_cb_state, "vkCmdExecuteCommands()", 0, "VUID-vkCmdExecuteCommands-pCommandBuffers-00089"); if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { if (sub_cb_state->in_use.load() || linked_command_buffers.count(sub_cb_state)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00090", "Attempt to simultaneously execute %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!", report_data->FormatHandle(cb_state->commandBuffer).c_str()); } if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse, "vkCmdExecuteCommands(): Secondary %s does not have " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary " "%s to be treated as if it does not have " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->commandBuffer).c_str()); } } if (!cb_state->activeQueries.empty() && !enabled_features.core.inheritedQueries) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-commandBuffer-00101", "vkCmdExecuteCommands(): Secondary %s cannot be submitted with a query in flight and " "inherited queries not supported on this device.", report_data->FormatHandle(pCommandBuffers[i]).c_str()); } // Validate initial layout uses vs. the primary cmd buffer state // Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001" // initial layout usage of secondary command buffers resources must match parent command buffer const auto *const_cb_state = static_cast<const CMD_BUFFER_STATE *>(cb_state); for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) { const auto image = sub_layout_map_entry.first; const auto *image_state = GetImageState(image); if (!image_state) continue; // Can't set layouts of a dead image const auto *cb_subres_map = GetImageSubresourceLayoutMap(const_cb_state, image); // Const getter can be null in which case we have nothing to check against for this image... if (!cb_subres_map) continue; const auto &sub_cb_subres_map = sub_layout_map_entry.second; // Validate the initial_uses, that they match the current state of the primary cb, or absent a current state, // that the match any initial_layout. for (auto it_init = sub_cb_subres_map->BeginInitialUse(); !it_init.AtEnd(); ++it_init) { const auto &sub_layout = (*it_init).layout; if (VK_IMAGE_LAYOUT_UNDEFINED == sub_layout) continue; // secondary doesn't care about current or initial const auto &subresource = (*it_init).subresource; // Look up the current layout (if any) VkImageLayout cb_layout = cb_subres_map->GetSubresourceLayout(subresource); const char *layout_type = "current"; if (cb_layout == kInvalidLayout) { // Find initial layout (if any) cb_layout = cb_subres_map->GetSubresourceInitialLayout(subresource); layout_type = "initial"; } if ((cb_layout != kInvalidLayout) && (cb_layout != sub_layout)) { log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001", "%s: Executed secondary command buffer using %s (subresource: aspectMask 0x%X array layer %u, " "mip level %u) which expects layout %s--instead, image %s layout is %s.", "vkCmdExecuteCommands():", report_data->FormatHandle(image).c_str(), subresource.aspectMask, subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(sub_layout), layout_type, string_VkImageLayout(cb_layout)); } } } linked_command_buffers.insert(sub_cb_state); } skip |= ValidatePrimaryCommandBuffer(cb_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-bufferlevel"); skip |= ValidateCmdQueueFlags(cb_state, "vkCmdExecuteCommands()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdExecuteCommands-commandBuffer-cmdpool"); skip |= ValidateCmd(cb_state, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()"); return skip; } void CoreChecks::PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); CMD_BUFFER_STATE *sub_cb_state = NULL; for (uint32_t i = 0; i < commandBuffersCount; i++) { sub_cb_state = GetCBState(pCommandBuffers[i]); assert(sub_cb_state); if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { // TODO: Because this is a state change, clearing the VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT needs to be moved // from the validation step to the recording step cb_state->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; } } // Propagate inital layout and current layout state to the primary cmd buffer for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) { const auto image = sub_layout_map_entry.first; const auto *image_state = GetImageState(image); if (!image_state) continue; // Can't set layouts of a dead image auto *cb_subres_map = GetImageSubresourceLayoutMap(cb_state, *image_state); const auto *sub_cb_subres_map = sub_layout_map_entry.second.get(); assert(cb_subres_map && sub_cb_subres_map); // Non const get and map traversal should never be null cb_subres_map->UpdateFrom(*sub_cb_subres_map); } sub_cb_state->primaryCommandBuffer = cb_state->commandBuffer; cb_state->linkedCommandBuffers.insert(sub_cb_state); sub_cb_state->linkedCommandBuffers.insert(cb_state); for (auto &function : sub_cb_state->queryUpdates) { cb_state->queryUpdates.push_back(function); } for (auto &function : sub_cb_state->queue_submit_functions) { cb_state->queue_submit_functions.push_back(function); } } } bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) { bool skip = false; DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem); if (mem_info) { if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), "VUID-vkMapMemory-memory-00682", "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: %s.", report_data->FormatHandle(mem).c_str()); } } skip |= ValidateMapMemRange(mem, offset, size); return skip; } void CoreChecks::PostCallRecordMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData, VkResult result) { if (VK_SUCCESS != result) return; // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this StoreMemRanges(mem, offset, size); InitializeAndTrackMemory(mem, offset, size, ppData); } bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) { bool skip = false; auto mem_info = GetDevMemState(mem); if (mem_info && !mem_info->mem_range.size) { // Valid Usage: memory must currently be mapped skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), "VUID-vkUnmapMemory-memory-00689", "Unmapping Memory without memory being mapped: %s.", report_data->FormatHandle(mem).c_str()); } return skip; } void CoreChecks::PreCallRecordUnmapMemory(VkDevice device, VkDeviceMemory mem) { auto mem_info = GetDevMemState(mem); mem_info->mem_range.size = 0; if (mem_info->shadow_copy) { free(mem_info->shadow_copy_base); mem_info->shadow_copy_base = 0; mem_info->shadow_copy = 0; } } bool CoreChecks::ValidateMemoryIsMapped(const char *funcName, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { bool skip = false; for (uint32_t i = 0; i < memRangeCount; ++i) { auto mem_info = GetDevMemState(pMemRanges[i].memory); if (mem_info) { if (pMemRanges[i].size == VK_WHOLE_SIZE) { if (mem_info->mem_range.offset > pMemRanges[i].offset) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00686", "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").", funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset)); } } else { const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE) ? mem_info->alloc_info.allocationSize : (mem_info->mem_range.offset + mem_info->mem_range.size); if ((mem_info->mem_range.offset > pMemRanges[i].offset) || (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00685", "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER ") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").", funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end)); } } } } return skip; } bool CoreChecks::ValidateAndCopyNoncoherentMemoryToDriver(uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) { bool skip = false; for (uint32_t i = 0; i < mem_range_count; ++i) { auto mem_info = GetDevMemState(mem_ranges[i].memory); if (mem_info) { if (mem_info->shadow_copy) { VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE) ? mem_info->mem_range.size : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset); char *data = static_cast<char *>(mem_info->shadow_copy); for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) { if (data[j] != NoncoherentMemoryFillValue) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory), kVUID_Core_MemTrack_InvalidMap, "Memory underflow was detected on %s.", report_data->FormatHandle(mem_ranges[i].memory).c_str()); } } for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) { if (data[j] != NoncoherentMemoryFillValue) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory), kVUID_Core_MemTrack_InvalidMap, "Memory overflow was detected on %s.", report_data->FormatHandle(mem_ranges[i].memory).c_str()); } } memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size)); } } } return skip; } void CoreChecks::CopyNoncoherentMemoryFromDriver(uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) { for (uint32_t i = 0; i < mem_range_count; ++i) { auto mem_info = GetDevMemState(mem_ranges[i].memory); if (mem_info && mem_info->shadow_copy) { VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE) ? mem_info->mem_range.size : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset); char *data = static_cast<char *>(mem_info->shadow_copy); memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size)); } } } bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(const char *func_name, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) { bool skip = false; for (uint32_t i = 0; i < mem_range_count; ++i) { uint64_t atom_size = phys_dev_props.limits.nonCoherentAtomSize; if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-offset-00687", "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").", func_name, i, mem_ranges[i].offset, atom_size); } auto mem_info = GetDevMemState(mem_ranges[i].memory); if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (mem_ranges[i].size + mem_ranges[i].offset != mem_info->alloc_info.allocationSize) && (SafeModulo(mem_ranges[i].size, atom_size) != 0)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-size-01390", "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").", func_name, i, mem_ranges[i].size, atom_size); } } return skip; } bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { bool skip = false; skip |= ValidateMappedMemoryRangeDeviceLimits("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); skip |= ValidateAndCopyNoncoherentMemoryToDriver(memRangeCount, pMemRanges); skip |= ValidateMemoryIsMapped("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); return skip; } bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { bool skip = false; skip |= ValidateMappedMemoryRangeDeviceLimits("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); skip |= ValidateMemoryIsMapped("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); return skip; } void CoreChecks::PostCallRecordInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges, VkResult result) { if (VK_SUCCESS == result) { // Update our shadow copy with modified driver data CopyNoncoherentMemoryFromDriver(memRangeCount, pMemRanges); } } bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) { bool skip = false; auto mem_info = GetDevMemState(mem); if (mem_info) { if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem), "VUID-vkGetDeviceMemoryCommitment-memory-00690", "Querying commitment for memory without VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: %s.", report_data->FormatHandle(mem).c_str()); } } return skip; } bool CoreChecks::ValidateBindImageMemory(VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset, const char *api_name) { bool skip = false; IMAGE_STATE *image_state = GetImageState(image); if (image_state) { // Track objects tied to memory uint64_t image_handle = HandleToUint64(image); skip = ValidateSetMemBinding(mem, VulkanTypedHandle(image, kVulkanObjectTypeImage), api_name); #ifdef VK_USE_PLATFORM_ANDROID_KHR if (image_state->external_format_android) { if (image_state->memory_requirements_checked) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, kVUID_Core_DrawState_InvalidImage, "%s: Must not call vkGetImageMemoryRequirements on %s that will be bound to an external " "Android hardware buffer.", api_name, report_data->FormatHandle(image).c_str()); } return skip; } #endif // VK_USE_PLATFORM_ANDROID_KHR if (!image_state->memory_requirements_checked) { // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from // vkGetImageMemoryRequirements() skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, kVUID_Core_DrawState_InvalidImage, "%s: Binding memory to %s but vkGetImageMemoryRequirements() has not been called on that image.", api_name, report_data->FormatHandle(image).c_str()); // Make the call for them so we can verify the state DispatchGetImageMemoryRequirements(device, image, &image_state->requirements); } // Validate bound memory range information auto mem_info = GetDevMemState(mem); if (mem_info) { skip |= ValidateInsertImageMemoryRange(image, mem_info, memoryOffset, image_state->requirements, image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name); skip |= ValidateMemoryTypes(mem_info, image_state->requirements.memoryTypeBits, api_name, "VUID-vkBindImageMemory-memory-01047"); } // Validate memory requirements alignment if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, "VUID-vkBindImageMemory-memoryOffset-01048", "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with image.", api_name, memoryOffset, image_state->requirements.alignment); } if (mem_info) { // Validate memory requirements size if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle, "VUID-vkBindImageMemory-size-01049", "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with image.", api_name, mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size); } // Validate dedicated allocation if (mem_info->is_dedicated && ((mem_info->dedicated_image != image) || (memoryOffset != 0))) { // TODO: Add vkBindImageMemory2KHR error message when added to spec. auto validation_error = kVUIDUndefined; if (strcmp(api_name, "vkBindImageMemory()") == 0) { validation_error = "VUID-vkBindImageMemory-memory-01509"; } skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, image_handle, validation_error, "%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfoKHR::%s must be equal " "to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(mem_info->dedicated_image).c_str(), report_data->FormatHandle(image).c_str(), memoryOffset); } } } return skip; } bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) { return ValidateBindImageMemory(image, mem, memoryOffset, "vkBindImageMemory()"); } void CoreChecks::UpdateBindImageMemoryState(VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) { IMAGE_STATE *image_state = GetImageState(image); if (image_state) { // Track bound memory range information auto mem_info = GetDevMemState(mem); if (mem_info) { InsertImageMemoryRange(image, mem_info, memoryOffset, image_state->requirements, image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR); } // Track objects tied to memory SetMemBinding(mem, image_state, memoryOffset, VulkanTypedHandle(image, kVulkanObjectTypeImage)); } } void CoreChecks::PostCallRecordBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset, VkResult result) { if (VK_SUCCESS != result) return; UpdateBindImageMemoryState(image, mem, memoryOffset); } bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) { bool skip = false; char api_name[128]; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i); skip |= ValidateBindImageMemory(pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) { bool skip = false; char api_name[128]; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindImageMemory2KHR() pBindInfos[%u]", i); skip |= ValidateBindImageMemory(pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } void CoreChecks::PostCallRecordBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos, VkResult result) { if (VK_SUCCESS != result) return; for (uint32_t i = 0; i < bindInfoCount; i++) { UpdateBindImageMemoryState(pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset); } } void CoreChecks::PostCallRecordBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos, VkResult result) { if (VK_SUCCESS != result) return; for (uint32_t i = 0; i < bindInfoCount; i++) { UpdateBindImageMemoryState(pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset); } } bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) { bool skip = false; auto event_state = GetEventState(event); if (event_state) { if (event_state->write_in_use) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, HandleToUint64(event), kVUID_Core_DrawState_QueueForwardProgress, "Cannot call vkSetEvent() on %s that is already in use by a command buffer.", report_data->FormatHandle(event).c_str()); } } return skip; } void CoreChecks::PreCallRecordSetEvent(VkDevice device, VkEvent event) { auto event_state = GetEventState(event); if (event_state) { event_state->needsSignaled = false; event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT; } // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297) for (auto queue_data : queueMap) { auto event_entry = queue_data.second.eventToStageMap.find(event); if (event_entry != queue_data.second.eventToStageMap.end()) { event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT; } } } bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) { auto pFence = GetFenceState(fence); bool skip = ValidateFenceForSubmit(pFence); if (skip) { return true; } unordered_set<VkSemaphore> signaled_semaphores; unordered_set<VkSemaphore> unsignaled_semaphores; unordered_set<VkSemaphore> internal_semaphores; for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) { const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx]; std::vector<SEMAPHORE_WAIT> semaphore_waits; std::vector<VkSemaphore> semaphore_signals; for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) { VkSemaphore semaphore = bindInfo.pWaitSemaphores[i]; auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (unsignaled_semaphores.count(semaphore) || (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "%s is waiting on %s that has no way to be signaled.", report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str()); } else { signaled_semaphores.erase(semaphore); unsignaled_semaphores.insert(semaphore); } } if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) { internal_semaphores.insert(semaphore); } } for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) { VkSemaphore semaphore = bindInfo.pSignalSemaphores[i]; auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) { if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "%s is signaling %s that was previously signaled by %s but has not since " "been waited on by any queue.", report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(pSemaphore->signaler.first).c_str()); } else { unsignaled_semaphores.erase(semaphore); signaled_semaphores.insert(semaphore); } } } // Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound std::unordered_set<IMAGE_STATE *> sparse_images; // If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) { const auto &image_bind = bindInfo.pImageBinds[i]; auto image_state = GetImageState(image_bind.image); if (!image_state) continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here. sparse_images.insert(image_state); if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) { if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) { // For now just warning if sparse image binding occurs without calling to get reqs first return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding sparse memory to %s without first calling " "vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.", report_data->FormatHandle(image_state->image).c_str()); } } if (!image_state->memory_requirements_checked) { // For now just warning if sparse image binding occurs without calling to get reqs first return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding sparse memory to %s without first calling " "vkGetImageMemoryRequirements() to retrieve requirements.", report_data->FormatHandle(image_state->image).c_str()); } } for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) { const auto &image_opaque_bind = bindInfo.pImageOpaqueBinds[i]; auto image_state = GetImageState(bindInfo.pImageOpaqueBinds[i].image); if (!image_state) continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here. sparse_images.insert(image_state); if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) { if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) { // For now just warning if sparse image binding occurs without calling to get reqs first return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding opaque sparse memory to %s without first calling " "vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.", report_data->FormatHandle(image_state->image).c_str()); } } if (!image_state->memory_requirements_checked) { // For now just warning if sparse image binding occurs without calling to get reqs first return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding opaque sparse memory to %s without first calling " "vkGetImageMemoryRequirements() to retrieve requirements.", report_data->FormatHandle(image_state->image).c_str()); } for (uint32_t j = 0; j < image_opaque_bind.bindCount; ++j) { if (image_opaque_bind.pBinds[j].flags & VK_SPARSE_MEMORY_BIND_METADATA_BIT) { image_state->sparse_metadata_bound = true; } } } for (const auto &sparse_image_state : sparse_images) { if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound) { // Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound return log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(sparse_image_state->image), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding sparse memory to %s which requires a metadata aspect but no " "binding with VK_SPARSE_MEMORY_BIND_METADATA_BIT set was made.", report_data->FormatHandle(sparse_image_state->image).c_str()); } } } return skip; } void CoreChecks::PostCallRecordQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence, VkResult result) { if (result != VK_SUCCESS) return; uint64_t early_retire_seq = 0; auto pFence = GetFenceState(fence); auto pQueue = GetQueueState(queue); if (pFence) { if (pFence->scope == kSyncScopeInternal) { SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount)); if (!bindInfoCount) { // No work to do, just dropping a fence in the queue by itself. pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence); } } else { // Retire work up until this fence early, we will not see the wait that corresponds to this signal early_retire_seq = pQueue->seq + pQueue->submissions.size(); if (!external_sync_warning) { external_sync_warning = true; log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence), kVUID_Core_DrawState_QueueForwardProgress, "vkQueueBindSparse(): Signaling external %s on %s will disable validation of preceding command " "buffer lifecycle states and the in-use status of associated objects.", report_data->FormatHandle(fence).c_str(), report_data->FormatHandle(queue).c_str()); } } } for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) { const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx]; // Track objects tied to memory for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) { for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) { auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k]; SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size}, VulkanTypedHandle(bindInfo.pBufferBinds[j].buffer, kVulkanObjectTypeBuffer)); } } for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) { for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) { auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k]; SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size}, VulkanTypedHandle(bindInfo.pImageOpaqueBinds[j].image, kVulkanObjectTypeImage)); } } for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) { for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) { auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k]; // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4; SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, size}, VulkanTypedHandle(bindInfo.pImageBinds[j].image, kVulkanObjectTypeImage)); } } std::vector<SEMAPHORE_WAIT> semaphore_waits; std::vector<VkSemaphore> semaphore_signals; std::vector<VkSemaphore> semaphore_externals; for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) { VkSemaphore semaphore = bindInfo.pWaitSemaphores[i]; auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore) { if (pSemaphore->scope == kSyncScopeInternal) { if (pSemaphore->signaler.first != VK_NULL_HANDLE) { semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second}); pSemaphore->in_use.fetch_add(1); } pSemaphore->signaler.first = VK_NULL_HANDLE; pSemaphore->signaled = false; } else { semaphore_externals.push_back(semaphore); pSemaphore->in_use.fetch_add(1); if (pSemaphore->scope == kSyncScopeExternalTemporary) { pSemaphore->scope = kSyncScopeInternal; } } } } for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) { VkSemaphore semaphore = bindInfo.pSignalSemaphores[i]; auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore) { if (pSemaphore->scope == kSyncScopeInternal) { pSemaphore->signaler.first = queue; pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1; pSemaphore->signaled = true; pSemaphore->in_use.fetch_add(1); semaphore_signals.push_back(semaphore); } else { // Retire work up until this submit early, we will not see the wait that corresponds to this signal early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1); if (!external_sync_warning) { external_sync_warning = true; log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress, "vkQueueBindSparse(): Signaling external %s on %s will disable validation of " "preceding command buffer lifecycle states and the in-use status of associated objects.", report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(queue).c_str()); } } } } pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals, bindIdx == bindInfoCount - 1 ? fence : (VkFence)VK_NULL_HANDLE); } if (early_retire_seq) { RetireWorkOnQueue(pQueue, early_retire_seq); } } void CoreChecks::PostCallRecordCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore, VkResult result) { if (VK_SUCCESS != result) return; std::unique_ptr<SEMAPHORE_STATE> semaphore_state(new SEMAPHORE_STATE{}); semaphore_state->signaler.first = VK_NULL_HANDLE; semaphore_state->signaler.second = 0; semaphore_state->signaled = false; semaphore_state->scope = kSyncScopeInternal; semaphoreMap[*pSemaphore] = std::move(semaphore_state); } bool CoreChecks::ValidateImportSemaphore(VkSemaphore semaphore, const char *caller_name) { bool skip = false; SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore); if (sema_node) { const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore); skip |= ValidateObjectNotInUse(sema_node, obj_struct, caller_name, kVUIDUndefined); } return skip; } void CoreChecks::RecordImportSemaphoreState(VkSemaphore semaphore, VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) { SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore); if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) { if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) && sema_node->scope == kSyncScopeInternal) { sema_node->scope = kSyncScopeExternalTemporary; } else { sema_node->scope = kSyncScopeExternalPermanent; } } } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR( VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) { return ValidateImportSemaphore(pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR"); } void CoreChecks::PostCallRecordImportSemaphoreWin32HandleKHR( VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo, VkResult result) { if (VK_SUCCESS != result) return; RecordImportSemaphoreState(pImportSemaphoreWin32HandleInfo->semaphore, pImportSemaphoreWin32HandleInfo->handleType, pImportSemaphoreWin32HandleInfo->flags); } #endif // VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) { return ValidateImportSemaphore(pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR"); } void CoreChecks::PostCallRecordImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo, VkResult result) { if (VK_SUCCESS != result) return; RecordImportSemaphoreState(pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType, pImportSemaphoreFdInfo->flags); } void CoreChecks::RecordGetExternalSemaphoreState(VkSemaphore semaphore, VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) { SEMAPHORE_STATE *semaphore_state = GetSemaphoreState(semaphore); if (semaphore_state && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) { // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference semaphore_state->scope = kSyncScopeExternalPermanent; } } #ifdef VK_USE_PLATFORM_WIN32_KHR void CoreChecks::PostCallRecordGetSemaphoreWin32HandleKHR(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo, HANDLE *pHandle, VkResult result) { if (VK_SUCCESS != result) return; RecordGetExternalSemaphoreState(pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType); } #endif void CoreChecks::PostCallRecordGetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd, VkResult result) { if (VK_SUCCESS != result) return; RecordGetExternalSemaphoreState(pGetFdInfo->semaphore, pGetFdInfo->handleType); } bool CoreChecks::ValidateImportFence(VkFence fence, const char *caller_name) { FENCE_STATE *fence_node = GetFenceState(fence); bool skip = false; if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, HandleToUint64(fence), kVUIDUndefined, "Cannot call %s on %s that is currently in use.", caller_name, report_data->FormatHandle(fence).c_str()); } return skip; } void CoreChecks::RecordImportFenceState(VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type, VkFenceImportFlagsKHR flags) { FENCE_STATE *fence_node = GetFenceState(fence); if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) { if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) && fence_node->scope == kSyncScopeInternal) { fence_node->scope = kSyncScopeExternalTemporary; } else { fence_node->scope = kSyncScopeExternalPermanent; } } } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR(VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) { return ValidateImportFence(pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR"); } void CoreChecks::PostCallRecordImportFenceWin32HandleKHR(VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo, VkResult result) { if (VK_SUCCESS != result) return; RecordImportFenceState(pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType, pImportFenceWin32HandleInfo->flags); } #endif // VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) { return ValidateImportFence(pImportFenceFdInfo->fence, "vkImportFenceFdKHR"); } void CoreChecks::PostCallRecordImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo, VkResult result) { if (VK_SUCCESS != result) return; RecordImportFenceState(pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags); } void CoreChecks::RecordGetExternalFenceState(VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type) { FENCE_STATE *fence_state = GetFenceState(fence); if (fence_state) { if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) { // Export with reference transference becomes external fence_state->scope = kSyncScopeExternalPermanent; } else if (fence_state->scope == kSyncScopeInternal) { // Export with copy transference has a side effect of resetting the fence fence_state->state = FENCE_UNSIGNALED; } } } #ifdef VK_USE_PLATFORM_WIN32_KHR void CoreChecks::PostCallRecordGetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo, HANDLE *pHandle, VkResult result) { if (VK_SUCCESS != result) return; RecordGetExternalFenceState(pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType); } #endif void CoreChecks::PostCallRecordGetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd, VkResult result) { if (VK_SUCCESS != result) return; RecordGetExternalFenceState(pGetFdInfo->fence, pGetFdInfo->handleType); } void CoreChecks::PostCallRecordCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent, VkResult result) { if (VK_SUCCESS != result) return; eventMap[*pEvent].needsSignaled = false; eventMap[*pEvent].write_in_use = 0; eventMap[*pEvent].stageMask = VkPipelineStageFlags(0); } bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state, SWAPCHAIN_NODE *old_swapchain_state) { // All physical devices and queue families are required to be able to present to any native window on Android; require the // application to have established support on any other platform. if (!instance_extensions.vk_khr_android_surface) { auto support_predicate = [this](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool { // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device return (qs.first.gpu == physical_device) && qs.second; }; const auto &support = surface_state->gpu_queue_support; bool is_supported = std::any_of(support.begin(), support.end(), support_predicate); if (!is_supported) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-surface-01270", "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The " "vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with " "this surface for at least one queue family of this device.", func_name)) return true; } } if (old_swapchain_state) { if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pCreateInfo->oldSwapchain), "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name)) return true; } if (old_swapchain_state->retired) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pCreateInfo->oldSwapchain), "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933", "%s: pCreateInfo->oldSwapchain is retired", func_name)) return true; } } if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689", "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height)) return true; } auto physical_device_state = GetPhysicalDeviceState(); if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physical_device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery, "%s: surface capabilities not retrieved for this physical device", func_name)) return true; } else { // have valid capabilities auto &capabilities = physical_device_state->surfaceCapabilities; // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount: if (pCreateInfo->minImageCount < capabilities.minImageCount) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271", "%s called with minImageCount = %d, which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).", func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) return true; } if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272", "%s called with minImageCount = %d, which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).", func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) return true; } // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent: if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) || (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) || (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) || (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274", "%s called with imageExtent = (%d,%d), which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), " "maxImageExtent = (%d,%d).", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height)) return true; } // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in // VkSurfaceCapabilitiesKHR::supportedTransforms. if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) || !(pCreateInfo->preTransform & capabilities.supportedTransforms)) { // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build // it up a little at a time, and then log it: std::string errorString = ""; char str[1024]; // Here's the first part of the message: sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform)); errorString += str; for (int i = 0; i < 32; i++) { // Build up the rest of the message: if ((1 << i) & capabilities.supportedTransforms) { const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i)); sprintf(str, " %s\n", newStr); errorString += str; } } // Log the message that we've built up: if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", errorString.c_str())) return true; } // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) || !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) { // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build // it up a little at a time, and then log it: std::string errorString = ""; char str[1024]; // Here's the first part of the message: sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n", func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha)); errorString += str; for (int i = 0; i < 32; i++) { // Build up the rest of the message: if ((1 << i) & capabilities.supportedCompositeAlpha) { const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i)); sprintf(str, " %s\n", newStr); errorString += str; } } // Log the message that we've built up: if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", errorString.c_str())) return true; } // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers: if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275", "%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers)) return true; } // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags: if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276", "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.", func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags)) return true; } if (device_extensions.vk_khr_surface_protected_capabilities && (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR)) { VkPhysicalDeviceSurfaceInfo2KHR surfaceInfo = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR}; surfaceInfo.surface = pCreateInfo->surface; VkSurfaceProtectedCapabilitiesKHR surfaceProtectedCapabilities = {VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR}; VkSurfaceCapabilities2KHR surfaceCapabilities = {VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR}; surfaceCapabilities.pNext = &surfaceProtectedCapabilities; DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->phys_device, &surfaceInfo, &surfaceCapabilities); if (!surfaceProtectedCapabilities.supportsProtected) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03187", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR but the surface " "capabilities does not have VkSurfaceProtectedCapabilitiesKHR.supportsProtected set to VK_TRUE.", func_name)) return true; } } } // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR(): if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery, "%s called before getting format(s) from vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name)) return true; } else { // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format: bool foundFormat = false; bool foundColorSpace = false; bool foundMatch = false; for (auto const &format : physical_device_state->surface_formats) { if (pCreateInfo->imageFormat == format.format) { // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace: foundFormat = true; if (pCreateInfo->imageColorSpace == format.colorSpace) { foundMatch = true; break; } } else { if (pCreateInfo->imageColorSpace == format.colorSpace) { foundColorSpace = true; } } } if (!foundMatch) { if (!foundFormat) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273", "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d).", func_name, pCreateInfo->imageFormat)) return true; } if (!foundColorSpace) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273", "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d).", func_name, pCreateInfo->imageColorSpace)) return true; } } } // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR(): if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) { // FIFO is required to always be supported if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery, "%s called before getting present mode(s) from vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name)) return true; } } else { // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR(): bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(), pCreateInfo->presentMode) != physical_device_state->present_modes.end(); if (!foundMatch) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-presentMode-01281", "%s called with a non-supported presentMode (i.e. %s).", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode))) return true; } } // Validate state for shared presentable case if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode || VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) { if (!device_extensions.vk_khr_shared_presentable_image) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_ExtensionNotEnabled, "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not " "been enabled.", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode))) return true; } else if (pCreateInfo->minImageCount != 1) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383", "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount " "must be 1.", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount)) return true; } } if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) { if (!device_extensions.vk_khr_swapchain_mutable_format) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_DrawState_ExtensionNotEnabled, "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR which requires the " "VK_KHR_swapchain_mutable_format extension, which has not been enabled.", func_name)) return true; } else { const auto *image_format_list = lvl_find_in_chain<VkImageFormatListCreateInfoKHR>(pCreateInfo->pNext); if (image_format_list == nullptr) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the pNext chain of " "pCreateInfo does not contain an instance of VkImageFormatListCreateInfoKHR.", func_name)) return true; } else if (image_format_list->viewFormatCount == 0) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the viewFormatCount " "member of VkImageFormatListCreateInfoKHR in the pNext chain is zero.", func_name)) return true; } else { bool found_base_format = false; for (uint32_t i = 0; i < image_format_list->viewFormatCount; ++i) { if (image_format_list->pViewFormats[i] == pCreateInfo->imageFormat) { found_base_format = true; break; } } if (!found_base_format) { if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but none of the " "elements of the pViewFormats member of VkImageFormatListCreateInfoKHR match " "pCreateInfo->imageFormat.", func_name)) return true; } } } } if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) { bool skip = ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices", "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428", "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428", false); if (skip) return true; } return false; } bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) { auto surface_state = GetSurfaceState(pCreateInfo->surface); auto old_swapchain_state = GetSwapchainState(pCreateInfo->oldSwapchain); return ValidateCreateSwapchain("vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state); } void CoreChecks::RecordCreateSwapchainState(VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo, VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state, SWAPCHAIN_NODE *old_swapchain_state) { if (VK_SUCCESS == result) { auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain)); if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode || VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) { swapchain_state->shared_presentable = true; } surface_state->swapchain = swapchain_state.get(); swapchainMap[*pSwapchain] = std::move(swapchain_state); } else { surface_state->swapchain = nullptr; } // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain is retired if (old_swapchain_state) { old_swapchain_state->retired = true; } return; } void CoreChecks::PostCallRecordCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain, VkResult result) { auto surface_state = GetSurfaceState(pCreateInfo->surface); auto old_swapchain_state = GetSwapchainState(pCreateInfo->oldSwapchain); RecordCreateSwapchainState(result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state); } void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { if (!swapchain) return; auto swapchain_data = GetSwapchainState(swapchain); if (swapchain_data) { if (swapchain_data->images.size() > 0) { for (auto swapchain_image : swapchain_data->images) { auto image_sub = imageSubresourceMap.find(swapchain_image); if (image_sub != imageSubresourceMap.end()) { for (auto imgsubpair : image_sub->second) { auto image_item = imageLayoutMap.find(imgsubpair); if (image_item != imageLayoutMap.end()) { imageLayoutMap.erase(image_item); } } imageSubresourceMap.erase(image_sub); } ClearMemoryObjectBindings(VulkanTypedHandle(swapchain_image, kVulkanObjectTypeImage)); EraseQFOImageRelaseBarriers(swapchain_image); imageMap.erase(swapchain_image); } } auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface); if (surface_state) { if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr; } swapchainMap.erase(swapchain); } } bool CoreChecks::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) { auto swapchain_state = GetSwapchainState(swapchain); bool skip = false; if (swapchain_state && pSwapchainImages) { // Compare the preliminary value of *pSwapchainImageCount with the value this time: if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_Swapchain_PriorCount, "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has " "been seen for pSwapchainImages."); } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), kVUID_Core_Swapchain_InvalidCount, "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a " "value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.", *pSwapchainImageCount, swapchain_state->get_swapchain_image_count); } } return skip; } void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages, VkResult result) { if ((result != VK_SUCCESS) && (result != VK_INCOMPLETE)) return; auto swapchain_state = GetSwapchainState(swapchain); if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount); if (pSwapchainImages) { if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) { swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS; } for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) { if (swapchain_state->images[i] != VK_NULL_HANDLE) continue; // Already retrieved this. IMAGE_LAYOUT_STATE image_layout_node; image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED; image_layout_node.format = swapchain_state->createInfo.imageFormat; // Add imageMap entries for each swapchain image VkImageCreateInfo image_ci = {}; image_ci.flags = 0; image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.format = swapchain_state->createInfo.imageFormat; image_ci.extent.width = swapchain_state->createInfo.imageExtent.width; image_ci.extent.height = swapchain_state->createInfo.imageExtent.height; image_ci.extent.depth = 1; image_ci.mipLevels = 1; image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers; image_ci.samples = VK_SAMPLE_COUNT_1_BIT; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = swapchain_state->createInfo.imageUsage; image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode; imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci)); auto &image_state = imageMap[pSwapchainImages[i]]; image_state->valid = false; image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY; swapchain_state->images[i] = pSwapchainImages[i]; ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()}; imageSubresourceMap[pSwapchainImages[i]].push_back(subpair); imageLayoutMap[subpair] = image_layout_node; } } if (*pSwapchainImageCount) { if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) { swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT; } swapchain_state->get_swapchain_image_count = *pSwapchainImageCount; } } bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { bool skip = false; auto queue_state = GetQueueState(queue); for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { auto pSemaphore = GetSemaphoreState(pPresentInfo->pWaitSemaphores[i]); if (pSemaphore && !pSemaphore->signaled) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, kVUID_Core_DrawState_QueueForwardProgress, "%s is waiting on %s that has no way to be signaled.", report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str()); } } for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]); if (swapchain_data) { if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainInvalidImage, "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.", pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size()); } else { auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]]; auto image_state = GetImageState(image); if (image_state->shared_presentable) { image_state->layout_locked = true; } if (!image_state->acquired) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainImageNotAcquired, "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]); } vector<VkImageLayout> layouts; if (FindLayouts(image, layouts)) { for (auto layout : layouts) { if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!device_extensions.vk_khr_shared_presentable_image || (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue), "VUID-VkPresentInfoKHR-pImageIndices-01296", "Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.", string_VkImageLayout(layout)); } } } } // All physical devices and queue families are required to be able to present to any native window on Android; require // the application to have established support on any other platform. if (!instance_extensions.vk_khr_android_surface) { auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface); auto support_it = surface_state->gpu_queue_support.find({physical_device, queue_state->queueFamilyIndex}); if (support_it == surface_state->gpu_queue_support.end()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainUnsupportedQueue, "vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR"); } else if (!support_it->second) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-vkQueuePresentKHR-pSwapchains-01292", "vkQueuePresentKHR: Presenting image on queue that cannot present to this surface."); } } } } if (pPresentInfo && pPresentInfo->pNext) { // Verify ext struct const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext); if (present_regions) { for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) { auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]); assert(swapchain_data); VkPresentRegionKHR region = present_regions->pRegions[i]; for (uint32_t j = 0; j < region.rectangleCount; ++j) { VkRectLayerKHR rect = region.pRectangles[j]; if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-offset-01261", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, " "pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater " "than the corresponding swapchain's imageExtent.width (%i).", i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width); } if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-offset-01261", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, " "pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater " "than the corresponding swapchain's imageExtent.height (%i).", i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height); } if (rect.layer > swapchain_data->createInfo.imageArrayLayers) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-layer-01262", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer " "(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).", i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers); } } } } const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext); if (present_times_info) { if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[0]), "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247", "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount " "is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, " "VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.", present_times_info->swapchainCount, pPresentInfo->swapchainCount); } } } return skip; } void CoreChecks::PostCallRecordQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo, VkResult result) { // Semaphore waits occur before error generation, if the call reached the ICD. (Confirm?) for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { auto pSemaphore = GetSemaphoreState(pPresentInfo->pWaitSemaphores[i]); if (pSemaphore) { pSemaphore->signaler.first = VK_NULL_HANDLE; pSemaphore->signaled = false; } } for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { // Note: this is imperfect, in that we can get confused about what did or didn't succeed-- but if the app does that, it's // confused itself just as much. auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result; if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue; // this present didn't actually happen. // Mark the image as having been released to the WSI auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]); if (swapchain_data && (swapchain_data->images.size() > pPresentInfo->pImageIndices[i])) { auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]]; auto image_state = GetImageState(image); if (image_state) { image_state->acquired = false; } } } // Note: even though presentation is directed to a queue, there is no direct ordering between QP and subsequent work, so QP (and // its semaphore waits) /never/ participate in any completion proof. } bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) { bool skip = false; if (pCreateInfos) { for (uint32_t i = 0; i < swapchainCount; i++) { auto surface_state = GetSurfaceState(pCreateInfos[i].surface); auto old_swapchain_state = GetSwapchainState(pCreateInfos[i].oldSwapchain); std::stringstream func_name; func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()"; skip |= ValidateCreateSwapchain(func_name.str().c_str(), &pCreateInfos[i], surface_state, old_swapchain_state); } } return skip; } void CoreChecks::PostCallRecordCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains, VkResult result) { if (pCreateInfos) { for (uint32_t i = 0; i < swapchainCount; i++) { auto surface_state = GetSurfaceState(pCreateInfos[i].surface); auto old_swapchain_state = GetSwapchainState(pCreateInfos[i].oldSwapchain); RecordCreateSwapchainState(result, &pCreateInfos[i], &pSwapchains[i], surface_state, old_swapchain_state); } } } bool CoreChecks::ValidateAcquireNextImage(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, const char *func_name) { bool skip = false; if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-vkAcquireNextImageKHR-semaphore-01780", "%s: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to " "determine the completion of this operation.", func_name); } auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore), "VUID-vkAcquireNextImageKHR-semaphore-01286", "%s: Semaphore must not be currently signaled or in a wait state.", func_name); } auto pFence = GetFenceState(fence); if (pFence) { skip |= ValidateFenceForSubmit(pFence); } auto swapchain_data = GetSwapchainState(swapchain); if (swapchain_data && swapchain_data->retired) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(swapchain), "VUID-vkAcquireNextImageKHR-swapchain-01285", "%s: This swapchain has been retired. The application can still present any images it " "has acquired, but cannot acquire any more.", func_name); } auto physical_device_state = GetPhysicalDeviceState(); if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) { uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(), [=](VkImage image) { return GetImageState(image)->acquired; }); if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainTooManyImages, "%s: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")", func_name, acquired_images); } } if (swapchain_data && swapchain_data->images.size() == 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainImagesNotFound, "%s: No images found to acquire from. Application probably did not call " "vkGetSwapchainImagesKHR after swapchain creation.", func_name); } return skip; } bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) { return ValidateAcquireNextImage(device, swapchain, timeout, semaphore, fence, pImageIndex, "vkAcquireNextImageKHR"); } bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex) { bool skip = false; skip |= ValidateDeviceMaskToPhysicalDeviceCount(pAcquireInfo->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pAcquireInfo->swapchain), "VUID-VkAcquireNextImageInfoKHR-deviceMask-01290"); skip |= ValidateDeviceMaskToZero(pAcquireInfo->deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pAcquireInfo->swapchain), "VUID-VkAcquireNextImageInfoKHR-deviceMask-01291"); skip |= ValidateAcquireNextImage(device, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore, pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR"); return skip; } void CoreChecks::RecordAcquireNextImageState(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) { auto pFence = GetFenceState(fence); if (pFence && pFence->scope == kSyncScopeInternal) { // Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary // import pFence->state = FENCE_INFLIGHT; pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof. } auto pSemaphore = GetSemaphoreState(semaphore); if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) { // Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a // temporary import pSemaphore->signaled = true; pSemaphore->signaler.first = VK_NULL_HANDLE; } // Mark the image as acquired. auto swapchain_data = GetSwapchainState(swapchain); if (swapchain_data && (swapchain_data->images.size() > *pImageIndex)) { auto image = swapchain_data->images[*pImageIndex]; auto image_state = GetImageState(image); if (image_state) { image_state->acquired = true; image_state->shared_presentable = swapchain_data->shared_presentable; } } } void CoreChecks::PostCallRecordAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, VkResult result) { if ((VK_SUCCESS != result) && (VK_SUBOPTIMAL_KHR != result)) return; RecordAcquireNextImageState(device, swapchain, timeout, semaphore, fence, pImageIndex); } void CoreChecks::PostCallRecordAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex, VkResult result) { if ((VK_SUCCESS != result) && (VK_SUBOPTIMAL_KHR != result)) return; RecordAcquireNextImageState(device, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore, pAcquireInfo->fence, pImageIndex); } void CoreChecks::PostCallRecordEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices, VkResult result) { if ((NULL != pPhysicalDevices) && ((result == VK_SUCCESS || result == VK_INCOMPLETE))) { for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) { auto &phys_device_state = physical_device_map[pPhysicalDevices[i]]; phys_device_state.phys_device = pPhysicalDevices[i]; // Init actual features for each physical device DispatchGetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features2.features); } } } // Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(debug_report_data *report_data, PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family_property_count, bool qfp_null, const char *caller_name) { bool skip = false; if (!qfp_null) { // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) { skip |= log_msg( report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_MissingQueryCount, "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended " "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.", caller_name, caller_name); // Then verify that pCount that is passed in on second call matches what was returned } else if (pd_state->queue_family_known_count != requested_queue_family_property_count) { skip |= log_msg( report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_CountMismatch, "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32 ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32 ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was " "previously obtained by calling %s with NULL pQueueFamilyProperties.", caller_name, requested_queue_family_property_count, pd_state->queue_family_known_count, caller_name, caller_name); } pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS; } return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(report_data, physical_device_state, *pQueueFamilyPropertyCount, (nullptr == pQueueFamilyProperties), "vkGetPhysicalDeviceQueueFamilyProperties()"); } bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(report_data, physical_device_state, *pQueueFamilyPropertyCount, (nullptr == pQueueFamilyProperties), "vkGetPhysicalDeviceQueueFamilyProperties2()"); } bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(report_data, physical_device_state, *pQueueFamilyPropertyCount, (nullptr == pQueueFamilyProperties), "vkGetPhysicalDeviceQueueFamilyProperties2KHR()"); } // Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) { pd_state->queue_family_known_count = std::max(pd_state->queue_family_known_count, count); if (!pQueueFamilyProperties) { if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT; } else { // Save queue family properties pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS; pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count)); for (uint32_t i = 0; i < count; ++i) { pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties; } } } void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); VkQueueFamilyProperties2KHR *pqfp = nullptr; std::vector<VkQueueFamilyProperties2KHR> qfp; qfp.resize(*pQueueFamilyPropertyCount); if (pQueueFamilyProperties) { for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; ++i) { qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR; qfp[i].pNext = nullptr; qfp[i].queueFamilyProperties = pQueueFamilyProperties[i]; } pqfp = qfp.data(); } StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pqfp); } void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties); } void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties); } bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) { auto surface_state = GetSurfaceState(surface); bool skip = false; if ((surface_state) && (surface_state->swapchain)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, HandleToUint64(instance), "VUID-vkDestroySurfaceKHR-surface-01266", "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed."); } return skip; } void CoreChecks::PreCallRecordValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) { surface_map.erase(surface); } void CoreChecks::RecordVulkanSurface(VkSurfaceKHR *pSurface) { surface_map[*pSurface] = std::unique_ptr<SURFACE_STATE>(new SURFACE_STATE{*pSurface}); } void CoreChecks::PostCallRecordCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } #ifdef VK_USE_PLATFORM_ANDROID_KHR void CoreChecks::PostCallRecordCreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } #endif // VK_USE_PLATFORM_ANDROID_KHR #ifdef VK_USE_PLATFORM_IOS_MVK void CoreChecks::PostCallRecordCreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } #endif // VK_USE_PLATFORM_IOS_MVK #ifdef VK_USE_PLATFORM_MACOS_MVK void CoreChecks::PostCallRecordCreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } #endif // VK_USE_PLATFORM_MACOS_MVK #ifdef VK_USE_PLATFORM_WAYLAND_KHR void CoreChecks::PostCallRecordCreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display *display) { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306", "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_WAYLAND_KHR #ifdef VK_USE_PLATFORM_WIN32_KHR void CoreChecks::PostCallRecordCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309", "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_WIN32_KHR #ifdef VK_USE_PLATFORM_XCB_KHR void CoreChecks::PostCallRecordCreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t *connection, xcb_visualid_t visual_id) { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312", "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_XCB_KHR #ifdef VK_USE_PLATFORM_XLIB_KHR void CoreChecks::PostCallRecordCreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) { if (VK_SUCCESS != result) return; RecordVulkanSurface(pSurface); } bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display *dpy, VisualID visualID) { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315", "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_XLIB_KHR void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR *pSurfaceCapabilities, VkResult result) { if (VK_SUCCESS != result) return; auto physical_device_state = GetPhysicalDeviceState(physicalDevice); physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS; physical_device_state->surfaceCapabilities = *pSurfaceCapabilities; } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, VkSurfaceCapabilities2KHR *pSurfaceCapabilities, VkResult result) { if (VK_SUCCESS != result) return; auto physical_device_state = GetPhysicalDeviceState(physicalDevice); physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS; physical_device_state->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities; } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT *pSurfaceCapabilities, VkResult result) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS; physical_device_state->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount; physical_device_state->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount; physical_device_state->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent; physical_device_state->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent; physical_device_state->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent; physical_device_state->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers; physical_device_state->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms; physical_device_state->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform; physical_device_state->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha; physical_device_state->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags; } bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32 *pSupported) { const auto physical_device_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(physical_device_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269", "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex"); } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32 *pSupported, VkResult result) { if (VK_SUCCESS != result) return; auto surface_state = GetSurfaceState(surface); surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE); } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pPresentModeCount, VkPresentModeKHR *pPresentModes, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; // TODO: This isn't quite right -- available modes may differ by surface AND physical device. auto physical_device_state = GetPhysicalDeviceState(physicalDevice); auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState; if (*pPresentModeCount) { if (call_state < QUERY_COUNT) call_state = QUERY_COUNT; if (*pPresentModeCount > physical_device_state->present_modes.size()) physical_device_state->present_modes.resize(*pPresentModeCount); } if (pPresentModes) { if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS; for (uint32_t i = 0; i < *pPresentModeCount; i++) { physical_device_state->present_modes[i] = pPresentModes[i]; } } } bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats) { if (!pSurfaceFormats) return false; auto physical_device_state = GetPhysicalDeviceState(physicalDevice); auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState; bool skip = false; switch (call_state) { case UNCALLED: // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application didn't // previously call this function with a NULL value of pSurfaceFormats: skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), kVUID_Core_DevLimit_MustQueryCount, "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior " "positive value has been seen for pSurfaceFormats."); break; default: auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size(); if (prev_format_count != *pSurfaceFormatCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), kVUID_Core_DevLimit_CountMismatch, "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with " "pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned " "when pSurfaceFormatCount was NULL.", *pSurfaceFormatCount, prev_format_count); } break; } return skip; } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; auto physical_device_state = GetPhysicalDeviceState(physicalDevice); auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState; if (*pSurfaceFormatCount) { if (call_state < QUERY_COUNT) call_state = QUERY_COUNT; if (*pSurfaceFormatCount > physical_device_state->surface_formats.size()) physical_device_state->surface_formats.resize(*pSurfaceFormatCount); } if (pSurfaceFormats) { if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS; for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) { physical_device_state->surface_formats[i] = pSurfaceFormats[i]; } } } void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; auto physicalDeviceState = GetPhysicalDeviceState(physicalDevice); if (*pSurfaceFormatCount) { if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) { physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT; } if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size()) physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount); } if (pSurfaceFormats) { if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) { physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS; } for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) { physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat; } } } void CoreChecks::PreCallRecordCmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) { BeginCmdDebugUtilsLabel(report_data, commandBuffer, pLabelInfo); } void CoreChecks::PostCallRecordCmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) { EndCmdDebugUtilsLabel(report_data, commandBuffer); } void CoreChecks::PreCallRecordCmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) { InsertCmdDebugUtilsLabel(report_data, commandBuffer, pLabelInfo); // Squirrel away an easily accessible copy. CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); cb_state->debug_label = LoggingLabel(pLabelInfo); } void CoreChecks::PostRecordEnumeratePhysicalDeviceGroupsState(uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) { if (NULL != pPhysicalDeviceGroupProperties) { for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) { for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) { VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j]; auto &phys_device_state = physical_device_map[cur_phys_dev]; phys_device_state.phys_device = cur_phys_dev; // Init actual features for each physical device DispatchGetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features2.features); } } } } void CoreChecks::PostCallRecordEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; PostRecordEnumeratePhysicalDeviceGroupsState(pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties); } void CoreChecks::PostCallRecordEnumeratePhysicalDeviceGroupsKHR(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; PostRecordEnumeratePhysicalDeviceGroupsState(pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties); } bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo) { bool skip = false; const auto layout = GetDescriptorSetLayout(this, pCreateInfo->descriptorSetLayout); if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) { const VulkanTypedHandle ds_typed(pCreateInfo->descriptorSetLayout, kVulkanObjectTypeDescriptorSetLayout); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, ds_typed.handle, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350", "%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name, report_data->FormatHandle(ds_typed).c_str()); } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) { auto bind_point = pCreateInfo->pipelineBindPoint; bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE); if (!valid_bp) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351", "%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point)); } const auto pipeline_layout = GetPipelineLayout(pCreateInfo->pipelineLayout); if (!pipeline_layout) { const VulkanTypedHandle pl_typed(pCreateInfo->pipelineLayout, kVulkanObjectTypePipelineLayout); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_typed.handle, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352", "%s: Invalid pCreateInfo->pipelineLayout (%s)", func_name, report_data->FormatHandle(pl_typed).c_str()); } else { const uint32_t pd_set = pCreateInfo->set; if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] || !pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) { const VulkanTypedHandle pl_typed(pCreateInfo->pipelineLayout, kVulkanObjectTypePipelineLayout); skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_typed.handle, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353", "%s: pCreateInfo->set (%" PRIu32 ") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).", func_name, pd_set, report_data->FormatHandle(pl_typed).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", pCreateInfo); return skip; } bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", pCreateInfo); return skip; } void CoreChecks::PreCallRecordDestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const VkAllocationCallbacks *pAllocator) { if (!descriptorUpdateTemplate) return; desc_template_map.erase(descriptorUpdateTemplate); } void CoreChecks::PreCallRecordDestroyDescriptorUpdateTemplateKHR(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const VkAllocationCallbacks *pAllocator) { if (!descriptorUpdateTemplate) return; desc_template_map.erase(descriptorUpdateTemplate); } void CoreChecks::RecordCreateDescriptorUpdateTemplateState(const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo); std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info)); desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state); } void CoreChecks::PostCallRecordCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate, VkResult result) { if (VK_SUCCESS != result) return; RecordCreateDescriptorUpdateTemplateState(pCreateInfo, pDescriptorUpdateTemplate); } void CoreChecks::PostCallRecordCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate, VkResult result) { if (VK_SUCCESS != result) return; RecordCreateDescriptorUpdateTemplateState(pCreateInfo, pDescriptorUpdateTemplate); } bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) { bool skip = false; auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate); if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) { // Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds // but retaining the assert as template support is new enough to want to investigate these in debug builds. assert(0); } else { const TEMPLATE_STATE *template_state = template_map_entry->second.get(); // TODO: Validate template push descriptor updates if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) { skip = ValidateUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData); } } return skip; } bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) { return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData); } bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) { return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData); } void CoreChecks::RecordUpdateDescriptorSetWithTemplateState(VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) { auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate); if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) { assert(0); } else { const TEMPLATE_STATE *template_state = template_map_entry->second.get(); // TODO: Record template push descriptor updates if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) { PerformUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData); } } } void CoreChecks::PreCallRecordUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) { RecordUpdateDescriptorSetWithTemplateState(descriptorSet, descriptorUpdateTemplate, pData); } void CoreChecks::PreCallRecordUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) { RecordUpdateDescriptorSetWithTemplateState(descriptorSet, descriptorUpdateTemplate, pData); } static std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> GetDslFromPipelineLayout( PIPELINE_LAYOUT_STATE const *layout_data, uint32_t set) { std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> dsl = nullptr; if (layout_data && (set < layout_data->set_layouts.size())) { dsl = layout_data->set_layouts[set]; } return dsl; } bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void *pData) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()"; bool skip = false; skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, func_name); auto layout_data = GetPipelineLayout(layout); auto dsl = GetDslFromPipelineLayout(layout_data, set); const VulkanTypedHandle layout_typed(layout, kVulkanObjectTypePipelineLayout); // Validate the set index points to a push descriptor set and is in range if (dsl) { if (!dsl->IsPushDescriptor()) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_typed.handle, "VUID-vkCmdPushDescriptorSetKHR-set-00365", "%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set, report_data->FormatHandle(layout_typed).c_str()); } } else if (layout_data && (set >= layout_data->set_layouts.size())) { skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_typed.handle, "VUID-vkCmdPushDescriptorSetKHR-set-00364", "%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set, report_data->FormatHandle(layout_typed).c_str(), static_cast<uint32_t>(layout_data->set_layouts.size())); } const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate); if (template_state) { const auto &template_ci = template_state->create_info; static const std::map<VkPipelineBindPoint, std::string> bind_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")}; skip |= ValidatePipelineBindPoint(cb_state, template_ci.pipelineBindPoint, func_name, bind_errors); if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_TemplateType, "%s: descriptorUpdateTemplate %s was not created with flag " "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.", func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str()); } if (template_ci.set != set) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_Template_SetMismatched, "%s: descriptorUpdateTemplate %s created with set %" PRIu32 " does not match command parameter set %" PRIu32 ".", func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set); } if (!CompatForSet(set, layout_data, GetPipelineLayout(template_ci.pipelineLayout))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched, "%s: descriptorUpdateTemplate %s created with %s is incompatible with command parameter " "%s for set %" PRIu32, func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), report_data->FormatHandle(template_ci.pipelineLayout).c_str(), report_data->FormatHandle(layout).c_str(), set); } } if (dsl && template_state) { // Create an empty proxy in order to use the existing descriptor set update validation cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, VK_NULL_HANDLE, dsl, 0, this); // Decode the template into a set of write updates cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData, dsl->GetDescriptorSetLayout()); // Validate the decoded update against the proxy_ds skip |= ValidatePushDescriptorsUpdate(&proxy_ds, static_cast<uint32_t>(decoded_template.desc_writes.size()), decoded_template.desc_writes.data(), func_name); } return skip; } void CoreChecks::PreCallRecordCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void *pData) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate); if (template_state) { auto layout_data = GetPipelineLayout(layout); auto dsl = GetDslFromPipelineLayout(layout_data, set); const auto &template_ci = template_state->create_info; if (dsl && !dsl->IsDestroyed()) { // Decode the template into a set of write updates cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData, dsl->GetDescriptorSetLayout()); RecordCmdPushDescriptorSetState(cb_state, template_ci.pipelineBindPoint, layout, set, static_cast<uint32_t>(decoded_template.desc_writes.size()), decoded_template.desc_writes.data()); } } } void CoreChecks::RecordGetPhysicalDeviceDisplayPlanePropertiesState(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, void *pProperties) { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); if (*pPropertyCount) { if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) { physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT; } physical_device_state->display_plane_property_count = *pPropertyCount; } if (pProperties) { if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) { physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS; } } } void CoreChecks::PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, VkDisplayPlanePropertiesKHR *pProperties, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; RecordGetPhysicalDeviceDisplayPlanePropertiesState(physicalDevice, pPropertyCount, pProperties); } void CoreChecks::PostCallRecordGetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, VkDisplayPlaneProperties2KHR *pProperties, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return; RecordGetPhysicalDeviceDisplayPlanePropertiesState(physicalDevice, pPropertyCount, pProperties); } bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, uint32_t planeIndex, const char *api_name) { bool skip = false; auto physical_device_state = GetPhysicalDeviceState(physicalDevice); if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) { skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), kVUID_Core_Swapchain_GetSupportedDisplaysWithoutQuery, "Potential problem with calling %s() without first retrieving properties from " "vkGetPhysicalDeviceDisplayPlanePropertiesKHR or vkGetPhysicalDeviceDisplayPlaneProperties2KHR.", api_name); } else { if (planeIndex >= physical_device_state->display_plane_property_count) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249", "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR " "or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?", api_name, physical_device_state->display_plane_property_count - 1); } } return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneSupportedDisplaysKHR"); return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneCapabilitiesKHR"); return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR *pCapabilities) { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, pDisplayPlaneInfo->planeIndex, "vkGetDisplayPlaneCapabilities2KHR"); return skip; } bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); return ValidateCmd(cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()"); } bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); return ValidateCmd(cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()"); } bool CoreChecks::PreCallValidateCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index) { if (disabled.query_validation) return false; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); QueryObject query_obj(queryPool, query, index); const char *cmd_name = "vkCmdBeginQueryIndexedEXT()"; bool skip = ValidateBeginQuery( cb_state, query_obj, flags, CMD_BEGINQUERYINDEXEDEXT, cmd_name, "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-cmdpool", "VUID-vkCmdBeginQueryIndexedEXT-queryType-02338", "VUID-vkCmdBeginQueryIndexedEXT-queryType-02333", "VUID-vkCmdBeginQueryIndexedEXT-queryType-02331", "VUID-vkCmdBeginQueryIndexedEXT-query-02332"); // Extension specific VU's const auto &query_pool_ci = GetQueryPoolState(query_obj.pool)->createInfo; if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) { if (device_extensions.vk_ext_transform_feedback && (index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02339", "%s: index %" PRIu32 " must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".", cmd_name, index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams); } } else if (index != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02340", "%s: index %" PRIu32 " must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.", cmd_name, index, report_data->FormatHandle(queryPool).c_str()); } return skip; } void CoreChecks::PostCallRecordCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index) { QueryObject query_obj = {queryPool, query, index}; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordBeginQuery(cb_state, query_obj); } bool CoreChecks::PreCallValidateCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index) { if (disabled.query_validation) return false; QueryObject query_obj = {queryPool, query, index}; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); return ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERYINDEXEDEXT, "vkCmdEndQueryIndexedEXT()", "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-cmdpool", "VUID-vkCmdEndQueryIndexedEXT-None-02342"); } void CoreChecks::PostCallRecordCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index) { QueryObject query_obj = {queryPool, query, index}; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordCmdEndQuery(cb_state, query_obj); } bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); // Minimal validation for command buffer state return ValidateCmd(cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()"); } bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT *pSampleLocationsInfo) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); // Minimal validation for command buffer state return ValidateCmd(cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()"); } bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const char *func_name, const VkSamplerYcbcrConversionCreateInfo *create_info) { bool skip = false; if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateCreateSamplerYcbcrConversionANDROID(create_info); } else { // Not android hardware buffer if (VK_FORMAT_UNDEFINED == create_info->format) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01649", "%s: CreateInfo format type is VK_FORMAT_UNDEFINED.", func_name); } } return skip; } bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) { return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversion()", pCreateInfo); } bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) { return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversionKHR()", pCreateInfo); } void CoreChecks::RecordCreateSamplerYcbcrConversionState(const VkSamplerYcbcrConversionCreateInfo *create_info, VkSamplerYcbcrConversion ycbcr_conversion) { if (device_extensions.vk_android_external_memory_android_hardware_buffer) { RecordCreateSamplerYcbcrConversionANDROID(create_info, ycbcr_conversion); } } void CoreChecks::PostCallRecordCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion, VkResult result) { if (VK_SUCCESS != result) return; RecordCreateSamplerYcbcrConversionState(pCreateInfo, *pYcbcrConversion); } void CoreChecks::PostCallRecordCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion, VkResult result) { if (VK_SUCCESS != result) return; RecordCreateSamplerYcbcrConversionState(pCreateInfo, *pYcbcrConversion); } void CoreChecks::PostCallRecordDestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks *pAllocator) { if (!ycbcrConversion) return; if (device_extensions.vk_android_external_memory_android_hardware_buffer) { RecordDestroySamplerYcbcrConversionANDROID(ycbcrConversion); } } void CoreChecks::PostCallRecordDestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks *pAllocator) { if (!ycbcrConversion) return; if (device_extensions.vk_android_external_memory_android_hardware_buffer) { RecordDestroySamplerYcbcrConversionANDROID(ycbcrConversion); } } bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT *pInfo) { bool skip = false; if (!enabled_features.buffer_address.bufferDeviceAddress) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(pInfo->buffer), "VUID-vkGetBufferDeviceAddressEXT-None-02598", "The bufferDeviceAddress feature must: be enabled."); } if (physical_device_count > 1 && !enabled_features.buffer_address.bufferDeviceAddressMultiDevice) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(pInfo->buffer), "VUID-vkGetBufferDeviceAddressEXT-device-02599", "If device was created with multiple physical devices, then the " "bufferDeviceAddressMultiDevice feature must: be enabled."); } auto buffer_state = GetBufferState(pInfo->buffer); if (buffer_state) { if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT)) { skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkGetBufferDeviceAddressEXT()", "VUID-VkBufferDeviceAddressInfoEXT-buffer-02600"); } skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT, true, "VUID-VkBufferDeviceAddressInfoEXT-buffer-02601", "vkGetBufferDeviceAddressEXT()", "VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT"); } return skip; } bool CoreChecks::ValidateQueryRange(VkDevice device, VkQueryPool queryPool, uint32_t totalCount, uint32_t firstQuery, uint32_t queryCount, const char *vuid_badfirst, const char *vuid_badrange) { bool skip = false; if (firstQuery >= totalCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), vuid_badfirst, "firstQuery (%" PRIu32 ") greater than or equal to query pool count (%" PRIu32 ") for %s", firstQuery, totalCount, report_data->FormatHandle(queryPool).c_str()); } if ((firstQuery + queryCount) > totalCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), vuid_badrange, "Query range [%" PRIu32 ", %" PRIu32 ") goes beyond query pool count (%" PRIu32 ") for %s", firstQuery, firstQuery + queryCount, totalCount, report_data->FormatHandle(queryPool).c_str()); } return skip; } bool CoreChecks::PreCallValidateResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { if (disabled.query_validation) return false; bool skip = false; if (!enabled_features.host_query_reset_features.hostQueryReset) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), "VUID-vkResetQueryPoolEXT-None-02665", "Host query reset not enabled for device"); } auto query_pool_it = queryPoolMap.find(queryPool); if (query_pool_it != queryPoolMap.end()) { skip |= ValidateQueryRange(device, queryPool, query_pool_it->second->createInfo.queryCount, firstQuery, queryCount, "VUID-vkResetQueryPoolEXT-firstQuery-02666", "VUID-vkResetQueryPoolEXT-firstQuery-02667"); } return skip; } void CoreChecks::PostCallRecordResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { // Do nothing if the feature is not enabled. if (!enabled_features.host_query_reset_features.hostQueryReset) return; // Do nothing if the query pool has been destroyed. auto query_pool_it = queryPoolMap.find(queryPool); if (query_pool_it == queryPoolMap.end()) return; // Reset the state of existing entries. QueryObject query_obj{queryPool, 0}; const uint32_t max_query_count = std::min(queryCount, query_pool_it->second->createInfo.queryCount - firstQuery); for (uint32_t i = 0; i < max_query_count; ++i) { query_obj.query = firstQuery + i; auto query_it = queryToStateMap.find(query_obj); if (query_it != queryToStateMap.end()) query_it->second = QUERYSTATE_RESET; } } void CoreChecks::PreCallRecordGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pPhysicalDeviceProperties) { // There is an implicit layer that can cause this call to return 0 for maxBoundDescriptorSets - Ignore such calls if (enabled.gpu_validation && enabled.gpu_validation_reserve_binding_slot && pPhysicalDeviceProperties->limits.maxBoundDescriptorSets > 0) { if (pPhysicalDeviceProperties->limits.maxBoundDescriptorSets > 1) { pPhysicalDeviceProperties->limits.maxBoundDescriptorSets -= 1; } else { log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), "UNASSIGNED-GPU-Assisted Validation Setup Error.", "Unable to reserve descriptor binding slot on a device with only one slot."); } } } VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkValidationCacheEXT *pValidationCache) { *pValidationCache = ValidationCache::Create(pCreateInfo); return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED; } void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks *pAllocator) { delete CastFromHandle<ValidationCache *>(validationCache); } VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize, void *pData) { size_t inSize = *pDataSize; CastFromHandle<ValidationCache *>(validationCache)->Write(pDataSize, pData); return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS; } VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT *pSrcCaches) { bool skip = false; auto dst = CastFromHandle<ValidationCache *>(dstCache); VkResult result = VK_SUCCESS; for (uint32_t i = 0; i < srcCacheCount; i++) { auto src = CastFromHandle<const ValidationCache *>(pSrcCaches[i]); if (src == dst) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT, 0, "VUID-vkMergeValidationCachesEXT-dstCache-01536", "vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.", HandleToUint64(dstCache)); result = VK_ERROR_VALIDATION_FAILED_EXT; } if (!skip) { dst->Merge(src); } } return result; } bool CoreChecks::PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) { bool skip = false; CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); skip |= ValidateDeviceMaskToPhysicalDeviceCount(deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00108"); skip |= ValidateDeviceMaskToZero(deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00109"); skip |= ValidateDeviceMaskToCommandBuffer(cb_state, deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00110"); if (cb_state->activeRenderPass) { skip |= ValidateDeviceMaskToRenderPass(cb_state, deviceMask, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), "VUID-vkCmdSetDeviceMask-deviceMask-00111"); } return skip; } bool CoreChecks::ValidateQueryPoolStride(const std::string &vuid_not_64, const std::string &vuid_64, const VkDeviceSize stride, const char *parameter_name, const uint64_t parameter_value, const VkQueryResultFlags flags) { bool skip = false; if (flags & VK_QUERY_RESULT_64_BIT) { static const int condition_multiples = 0b0111; if ((stride & condition_multiples) || (parameter_value & condition_multiples)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value); } } else { static const int condition_multiples = 0b0011; if ((stride & condition_multiples) || (parameter_value & condition_multiples)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid_not_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value); } } return skip; } bool CoreChecks::ValidateCmdDrawStrideWithStruct(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride, const char *struct_name, const uint32_t struct_size) { bool skip = false; static const int condition_multiples = 0b0011; if ((stride & condition_multiples) || (stride < struct_size)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), vuid, "stride %d is invalid or less than sizeof(%s) %d.", stride, struct_name, struct_size); } return skip; } bool CoreChecks::ValidateCmdDrawStrideWithBuffer(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride, const char *struct_name, const uint32_t struct_size, const uint32_t drawCount, const VkDeviceSize offset, const BUFFER_STATE *buffer_state) { bool skip = false; uint64_t validation_value = stride * (drawCount - 1) + offset + struct_size; if (validation_value > buffer_state->createInfo.size) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), vuid, "stride[%d] * (drawCount[%d] - 1) + offset[%" PRIx64 "] + sizeof(%s)[%d] = %" PRIx64 " is greater than the size[%" PRIx64 "] of %s.", stride, drawCount, offset, struct_name, struct_size, validation_value, buffer_state->createInfo.size, report_data->FormatHandle(buffer_state->buffer).c_str()); } return skip; }
1
11,109
Good call, none of these are technically invalid
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -20,7 +20,7 @@ partial class Build .OnlyWhenStatic(() => IsWin) .Executes(() => { - var project = ProfilerDirectory.GlobFiles("**/Datadog.AutoInstrumentation.Profiler.Native.Windows.vcxproj").Single(); + var project = ProfilerDirectory.GlobFiles("**/Datadog.Profiler.Native.Windows.vcxproj").Single(); // run: msbuild /property:Configuration=${{matrix.configuration}} /property:Platform=${{matrix.platform}} dd-continuous-profiler-dotnet\src\ProfilerEngine\Datadog.AutoInstrumentation.Profiler.Native.Windows\Datadog.AutoInstrumentation.Profiler.Native.Windows.WithTests.proj // If we're building for x64, build for x86 too
1
using Nuke.Common; using Nuke.Common.Tools.DotNet; using Nuke.Common.IO; using System.Linq; using Nuke.Common.Tooling; using Nuke.Common.Tools.MSBuild; using static Nuke.Common.EnvironmentInfo; using static Nuke.Common.IO.FileSystemTasks; using static Nuke.Common.Tools.MSBuild.MSBuildTasks; partial class Build { Target CompileNativeLoader => _ => _ .Unlisted() .Description("Compiles the native loader") .DependsOn(CompileNativeLoaderWindows); Target CompileNativeLoaderWindows => _ => _ .Unlisted() .OnlyWhenStatic(() => IsWin) .Executes(() => { var project = ProfilerDirectory.GlobFiles("**/Datadog.AutoInstrumentation.Profiler.Native.Windows.vcxproj").Single(); // run: msbuild /property:Configuration=${{matrix.configuration}} /property:Platform=${{matrix.platform}} dd-continuous-profiler-dotnet\src\ProfilerEngine\Datadog.AutoInstrumentation.Profiler.Native.Windows\Datadog.AutoInstrumentation.Profiler.Native.Windows.WithTests.proj // If we're building for x64, build for x86 too var platforms = Equals(TargetPlatform, MSBuildTargetPlatform.x64) ? new[] { MSBuildTargetPlatform.x64, MSBuildTargetPlatform.x86 } : new[] { MSBuildTargetPlatform.x86 }; // Can't use dotnet msbuild, as needs to use the VS version of MSBuild // Build native profiler assets MSBuild(s => s .SetTargetPath(NativeLoaderProject) .SetConfiguration(BuildConfiguration) .SetMSBuildPath() .DisableRestore() .SetMaxCpuCount(null) .CombineWith(platforms, (m, platform) => m .SetTargetPlatform(platform))); }); Target PublishNativeLoader => _ => _ .Unlisted() .DependsOn(PublishNativeLoaderWindows); Target PublishNativeLoaderWindows => _ => _ .Unlisted() .OnlyWhenStatic(() => IsWin) .After(CompileNativeLoader) .Executes(() => { foreach (var architecture in ArchitecturesForPlatform) { // Copy native tracer assets var source = NativeProfilerProject.Directory / "bin" / BuildConfiguration / architecture.ToString() / $"{NativeProfilerProject.Name}.dll"; var dest = TracerHomeDirectory / $"win-{architecture}"; Logger.Info($"Copying '{source}' to '{dest}'"); CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite); // Copy native loader assets source = NativeLoaderProject.Directory / "bin" / BuildConfiguration / architecture.ToString() / "loader.conf"; dest = MonitoringHomeDirectory; Logger.Info($"Copying '{source}' to '{dest}'"); CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite); source = NativeLoaderProject.Directory / "bin" / BuildConfiguration / architecture.ToString() / $"{NativeLoaderProject.Name}.dll"; var destFile = MonitoringHomeDirectory / $"{NativeLoaderProject.Name}.{architecture.ToString()}.dll"; Logger.Info($"Copying file '{source}' to 'file {destFile}'"); CopyFile(source, destFile, FileExistsPolicy.Overwrite); source = NativeLoaderProject.Directory / "bin" / BuildConfiguration / architecture.ToString() / $"{NativeLoaderProject.Name}.pdb"; destFile = MonitoringHomeDirectory / $"{NativeLoaderProject.Name}.{architecture.ToString()}.pdb"; Logger.Info($"Copying '{source}' to '{destFile}'"); CopyFile(source, destFile, FileExistsPolicy.Overwrite); } }); }
1
25,233
this one should be updated too
DataDog-dd-trace-dotnet
.cs
@@ -39,6 +39,7 @@ public final class InvocationFactory { operationMeta, swaggerArguments); invocation.addContext(Const.SRC_MICROSERVICE, getMicroserviceName()); + invocation.addContext(Const.SRC_INSTANCE, getInstanceId()); return invocation; }
1
/* * Copyright 2017 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.servicecomb.core.invocation; import io.servicecomb.core.Const; import io.servicecomb.core.Endpoint; import io.servicecomb.core.Invocation; import io.servicecomb.core.definition.MicroserviceMeta; import io.servicecomb.core.definition.OperationMeta; import io.servicecomb.core.definition.SchemaMeta; import io.servicecomb.core.provider.consumer.ReferenceConfig; import io.servicecomb.serviceregistry.RegistryUtils; public final class InvocationFactory { private InvocationFactory() { } private static String getMicroserviceName() { return RegistryUtils.getMicroservice().getServiceName(); } public static Invocation forConsumer(ReferenceConfig referenceConfig, OperationMeta operationMeta, Object[] swaggerArguments) { Invocation invocation = new Invocation(referenceConfig, operationMeta, swaggerArguments); invocation.addContext(Const.SRC_MICROSERVICE, getMicroserviceName()); return invocation; } /** * consumer端使用,schemaMeta级别的缓存,每次调用根据operationName来执行 */ public static Invocation forConsumer(ReferenceConfig referenceConfig, SchemaMeta schemaMeta, String operationName, Object[] swaggerArguments) { OperationMeta operationMeta = schemaMeta.ensureFindOperation(operationName); return forConsumer(referenceConfig, operationMeta, swaggerArguments); } /** * 为tcc场景提供的快捷方式,consumer端使用 */ public static Invocation forConsumer(ReferenceConfig referenceConfig, String operationQualifiedName, Object[] swaggerArguments) { MicroserviceMeta microserviceMeta = referenceConfig.getMicroserviceMeta(); OperationMeta operationMeta = microserviceMeta.ensureFindOperation(operationQualifiedName); return forConsumer(referenceConfig, operationMeta, swaggerArguments); } /** * transport server收到请求时,创建invocation */ public static Invocation forProvider(Endpoint endpoint, OperationMeta operationMeta, Object[] swaggerArguments) { return new Invocation(endpoint, operationMeta, swaggerArguments); } }
1
7,550
maybe need to add microserviceId too.
apache-servicecomb-java-chassis
java
@@ -0,0 +1,6 @@ +"""Tests for missing-param-doc and missing-type-doc for Numpy style docstrings +with accept-no-param-doc = no +""" +# pylint: disable=invalid-name, unused-argument, undefined-variable +# pylint: disable=line-too-long, too-few-public-methods, missing-class-docstring +# pylint: disable=missing-function-docstring, function-redefined, inconsistent-return-statements
1
1
18,964
I'm curious, do you create this commit at the very end by rebasing and taking the value you had after moving everything ?
PyCQA-pylint
py
@@ -939,12 +939,10 @@ describe('Core_alter', () => { expect(spec().$container.find('tr:eq(6) td:eq(0)').html()).toEqual('b1'); }); - it('should not add more source rows than defined in maxRows when trimming rows using the modifyRow hook', () => { + it('should not add more source rows than defined in maxRows when trimming rows using the TrimRows plugin', () => { const hot = handsontable({ data: Handsontable.helper.createSpreadsheetData(10, 4), - modifyRow(row) { - return [8, 9].indexOf(row) > -1 ? null : row; - }, + trimRows: [8, 9], maxRows: 10 });
1
describe('Core_alter', () => { const id = 'testContainer'; beforeEach(function() { this.$container = $(`<div id="${id}"></div>`).appendTo('body'); }); afterEach(function() { if (this.$container) { destroy(); this.$container.remove(); } }); const arrayOfNestedObjects = function() { return [ { id: 1, name: { first: 'Ted', last: 'Right' }, address: 'Street Name', zip: '80410', city: 'City Name' }, { id: 2, name: { first: 'Frank', last: 'Honest' }, address: 'Street Name', zip: '80410', city: 'City Name' }, { id: 3, name: { first: 'Joan', last: 'Well' }, address: 'Street Name', zip: '80410', city: 'City Name' } ]; }; const arrayOfArrays = function() { return [ ['', 'Kia', 'Nissan', 'Toyota', 'Honda'], ['2008', 10, 11, 12, 13], ['2009', 20, 11, 14, 13], ['2010', 30, 15, 12, 13] ]; }; describe('remove row', () => { describe('multiple items at once', () => { it('should remove rows when index groups are passed in ascending order', () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(15, 5), }); // [[rowVisualIndex, amountRowsToRemove] ...] alter('remove_row', [[1, 3], [5, 1], [7, 3], [11, 2]]); // It remove rows as follow: // 1--------3 5-1 7---------3 11-----2 // A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15 // // Result: A1, A5, A7, A11, A14, A15 expect(getDataAtCol(0)).toEqual(['A1', 'A5', 'A7', 'A11', 'A14', 'A15']); expect(getData().length).toBe(6); }); it('should remove rows when index groups are passed in descending order', () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(15, 5), }); // [[rowVisualIndex, amountRowsToRemove] ...] alter('remove_row', [[11, 2], [7, 3], [5, 1], [1, 3]]); // It remove rows as follow: // 1--------3 5-1 7---------3 11-----2 // A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15 // // Result: A1, A5, A7, A11, A14, A15 expect(getDataAtCol(0)).toEqual(['A1', 'A5', 'A7', 'A11', 'A14', 'A15']); expect(getData().length).toBe(6); }); it('should remove rows when index groups are passed as intersecting values', () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(15, 5), }); // [[rowVisualIndex, amountRowsToRemove] ...] alter('remove_row', [[1, 3], [4, 2], [5, 5], [11, 1]]); // It remove rows as follow: // 1---------------------------------9 11-1 // A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15 // // Result: A1, A11, A13, A14, A15 expect(getDataAtCol(0)).toEqual(['A1', 'A11', 'A13', 'A14', 'A15']); expect(getData().length).toBe(5); }); it('should remove rows when index groups are passed as intersecting values (the second scenario)', () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(15, 5), }); // [[rowVisualIndex, amountRowsToRemove] ...] alter('remove_row', [[1, 3], [2, 1], [5, 2]]); // It remove columns as follow: // 1--------3 5----2 // A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15 // // Result: A1, A5, A8, A9, A10, A11, A12, A13, A14, A15 expect(getDataAtCol(0)).toEqual(['A1', 'A5', 'A8', 'A9', 'A10', 'A11', 'A12', 'A13', 'A14', 'A15']); expect(getData().length).toBe(10); }); it('should remove rows when index groups are passed as intersecting values (placed randomly)', () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(15, 5), }); // [[rowVisualIndex, amountRowsToRemove] ...] alter('remove_row', [[4, 2], [11, 1], [5, 5], [1, 3]]); // It remove rows as follow: // 1---------------------------------9 11-1 // A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15 // // Result: A1, A11, A13, A14, A15 expect(getDataAtCol(0)).toEqual(['A1', 'A11', 'A13', 'A14', 'A15']); expect(getData().length).toBe(5); }); }); it('should remove row', () => { handsontable({ minRows: 5, data: arrayOfNestedObjects(), columns: [ { data: 'id' }, { data: 'name.first' } ] }); alter('remove_row', 1); expect(getDataAtCell(1, 1)).toEqual('Joan'); // Joan should be moved up expect(getData().length).toEqual(5); // new row should be added by keepEmptyRows }); it('should not remove row if amount is zero', () => { handsontable({ data: arrayOfNestedObjects(), columns: [ { data: 'id' }, { data: 'name.first' } ], }); const countedRows = countRows(); alter('remove_row', 1, 0); expect(countRows()).toBe(countedRows); }); it('should fire beforeRemoveRow event before removing row', () => { const onBeforeRemoveRow = jasmine.createSpy('onBeforeRemoveRow'); handsontable({ data: arrayOfNestedObjects(), columns: [ { data: 'id' }, { data: 'name.first' } ], beforeRemoveRow: onBeforeRemoveRow, }); alter('remove_row', 2, 1, 'customSource'); expect(onBeforeRemoveRow).toHaveBeenCalledWith(countRows(), 1, [2], 'customSource', undefined, undefined); }); it('should not remove row if removing has been canceled by beforeRemoveRow event handler', () => { const onBeforeRemoveRow = jasmine.createSpy('onBeforeRemoveRow'); onBeforeRemoveRow.and.callFake(() => false); handsontable({ data: arrayOfNestedObjects(), columns: [ { data: 'id' }, { data: 'name.first' } ], beforeRemoveRow: onBeforeRemoveRow }); expect(countRows()).toEqual(3); alter('remove_row'); expect(countRows()).toEqual(3); }); it('should not remove rows below minRows', () => { handsontable({ startRows: 5, minRows: 4 }); alter('remove_row', 1); alter('remove_row', 1); alter('remove_row', 1); expect(countRows()).toEqual(4); }); it('should not remove cols below minCols', () => { handsontable({ startCols: 5, minCols: 4 }); alter('remove_col', 1); alter('remove_col', 1); alter('remove_col', 1); expect(countCols()).toEqual(4); }); it('should remove one row if amount parameter is empty', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['d1', 'd2', 'd3'], ['e1', 'e2', 'e3'] ] }); alter('remove_row', 1); expect(countRows()).toEqual(4); expect(spec().$container.find('tr:eq(0) td:eq(0)').html()).toEqual('a1'); expect(spec().$container.find('tr:eq(1) td:eq(1)').html()).toEqual('c2'); }); it('should remove as many rows as given in the amount parameter', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['d1', 'd2', 'd3'], ['e1', 'e2', 'e3'] ] }); alter('remove_row', 1, 3); expect(countRows()).toEqual(2); expect(spec().$container.find('tr:eq(0) td:eq(0)').html()).toEqual('a1'); expect(spec().$container.find('tr:eq(1) td:eq(1)').html()).toEqual('e2'); }); it('should not remove more rows that exist', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['d1', 'd2', 'd3'], ['e1', 'e2', 'e3'] ] }); alter('remove_row', 1, 10); expect(countRows()).toEqual(1); expect(getHtCore().find('tr:last td:last').html()).toEqual('a3'); }); it('should remove one row from end if no parameters are given', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['d1', 'd2', 'd3'], ['e1', 'e2', 'e3'] ] }); alter('remove_row'); expect(countRows()).toEqual(4); expect(getHtCore().find('tr:last td:eq(0)').html()).toEqual('d1'); }); it('should remove amount of rows from end if index parameter is not given', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['d1', 'd2', 'd3'], ['e1', 'e2', 'e3'] ] }); alter('remove_row', null, 3); expect(countRows()).toEqual(2); expect(getHtCore().find('tr:last td:eq(0)').html()).toEqual('b1'); }); it('should remove rows from table with fixedRows', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'] ], fixedRowsTop: 1, minSpareRows: 0 }); alter('remove_row', 1); expect(countRows()).toEqual(1); }); it('should remove all rows from table with fixedRows', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'] ], fixedRowsTop: 1, minSpareRows: 0 }); alter('remove_row', 1); alter('remove_row', 1); expect(countRows()).toEqual(0); }); it('should remove row\'s cellProperties', () => { handsontable({ startCols: 1, startRows: 3 }); getCellMeta(0, 0).someValue = [0, 0]; getCellMeta(1, 0).someValue = [1, 0]; getCellMeta(2, 0).someValue = [2, 0]; alter('remove_row', 0); expect(getCellMeta(0, 0).someValue).toEqual([1, 0]); expect(getCellMeta(1, 0).someValue).toEqual([2, 0]); }); it('should fire callback on remove row', () => { let outputBefore; let outputAfter; handsontable({ minRows: 5, data: arrayOfNestedObjects(), columns: [ { data: 'id' }, { data: 'name.first' } ], beforeRemoveRow(index, amount, removedRows, source) { outputBefore = [index, amount, removedRows, source]; }, afterRemoveRow(index, amount, removedRows, source) { outputAfter = [index, amount, removedRows, source]; } }); alter('remove_row', 1, 2, 'customSource'); expect(outputBefore).toEqual([1, 2, [1, 2], 'customSource']); expect(outputAfter).toEqual([1, 2, [1, 2], 'customSource']); }); it('should decrement the number of fixed rows, if a fix row is removed', () => { const hot = handsontable({ startCols: 1, startRows: 3, fixedRowsTop: 4 }); alter('remove_row', 1, 1); expect(hot.getSettings().fixedRowsTop).toEqual(3); alter('remove_row', 1, 2); expect(hot.getSettings().fixedRowsTop).toEqual(1); }); it('should shift the cell meta according to the new row layout', () => { handsontable({ startCols: 3, startRows: 4 }); setCellMeta(2, 1, 'className', 'test'); alter('remove_row', 1, 1); expect(getCellMeta(1, 1).className).toEqual('test'); }); it('should shift the cell meta according to the new rows (>1) layout', () => { handsontable({ startCols: 3, startRows: 4 }); setCellMeta(2, 1, 'className', 'test'); alter('remove_row', 0, 2); expect(getCellMeta(0, 1).className).toEqual('test'); }); }); describe('remove column', () => { describe('multiple items at once', () => { it('should remove columns when index groups are passed in ascending order', () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(5, 15), }); // [[columnVisualIndex, amountColumnsToRemove] ...] alter('remove_col', [[1, 3], [5, 1], [7, 3], [11, 2]]); // It remove columns as follow: // 1--------3 5-1 7--------3 11---2 // A1, B1, C1, D1, E1, F1, G1, H1, I1, J1, K1, L1, M1, N1, O1 // // Result: A1, E1, G1, K1, N1, O1 expect(getDataAtRow(0)).toEqual(['A1', 'E1', 'G1', 'K1', 'N1', 'O1']); expect(getData()[0].length).toBe(6); }); it('should remove columns when index groups are passed in descending order', () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(5, 15), }); // [[columnVisualIndex, amountColumnsToRemove] ...] alter('remove_col', [[11, 2], [7, 3], [5, 1], [1, 3]]); // It remove columns as follow: // 1--------3 5-1 7--------3 11---2 // A1, B1, C1, D1, E1, F1, G1, H1, I1, J1, K1, L1, M1, N1, O1 // // Result: A1, E1, G1, K1, N1, O1 expect(getDataAtRow(0)).toEqual(['A1', 'E1', 'G1', 'K1', 'N1', 'O1']); expect(getData()[0].length).toBe(6); }); it('should remove columns when index groups are passed as intersecting values', () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(5, 15), }); // [[columnVisualIndex, amountColumnsToRemove] ...] alter('remove_col', [[1, 3], [4, 2], [5, 5], [11, 1]]); // It remove columns as follow: // 1--------------------------------9 11-1 // A1, B1, C1, D1, E1, F1, G1, H1, I1, J1, K1, L1, M1, N1, O1 // // Result: A1, K1, M1, N1, O1 expect(getDataAtRow(0)).toEqual(['A1', 'K1', 'M1', 'N1', 'O1']); expect(getData()[0].length).toBe(5); }); it('should remove columns when index groups are passed as intersecting values (the second scenario)', () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(5, 15), }); // [[columnVisualIndex, amountColumnsToRemove] ...] alter('remove_col', [[1, 3], [2, 1], [5, 2]]); // It remove columns as follow: // 1--------3 5----2 // A1, B1, C1, D1, E1, F1, G1, H1, I1, J1, K1, L1, M1, N1, O1 // // Result: A1, E1, H1 expect(getDataAtRow(0)).toEqual(['A1', 'E1', 'H1', 'I1', 'J1', 'K1', 'L1', 'M1', 'N1', 'O1']); expect(getData()[0].length).toBe(10); }); it('should remove columns when index groups are passed as intersecting values (placed randomly)', () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(5, 15), }); // [[columnVisualIndex, amountColumnsToRemove] ...] alter('remove_col', [[4, 2], [11, 1], [5, 5], [1, 3]]); // It remove columns as follow: // 1--------------------------------9 11-1 // A1, B1, C1, D1, E1, F1, G1, H1, I1, J1, K1, L1, M1, N1, O1 // // Result: A1, K1, M1, N1, O1 expect(getDataAtRow(0)).toEqual(['A1', 'K1', 'M1', 'N1', 'O1']); expect(getData()[0].length).toBe(5); }); }); it('should not remove column if amount is zero', () => { handsontable({ data: arrayOfArrays(), }); const countedColumns = countCols(); alter('remove_col', 1, 0); expect(countCols()).toBe(countedColumns); }); it('should remove one column if amount parameter is empty', () => { handsontable({ data: [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ] }); alter('remove_col', 1); expect(countCols()).toEqual(7); expect(spec().$container.find('tr:eq(0) td:eq(0)').html()).toEqual('a'); expect(spec().$container.find('tr:eq(1) td:eq(1)').html()).toEqual('c'); }); it('should remove as many columns as given in the amount parameter', () => { handsontable({ data: [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ] }); alter('remove_col', 1, 3); expect(countCols()).toEqual(5); expect(spec().$container.find('tr:eq(0) td:eq(0)').html()).toEqual('a'); expect(spec().$container.find('tr:eq(1) td:eq(1)').html()).toEqual('e'); }); it('should not remove more columns that exist', () => { handsontable({ data: [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ] }); alter('remove_col', 6, 3); expect(countCols()).toEqual(6); expect(spec().$container.find('tr:eq(1) td:last').html()).toEqual('f'); }); it('should remove one column from end if no parameters are given', () => { handsontable({ data: [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ] }); alter('remove_col'); expect(countCols()).toEqual(7); expect(spec().$container.find('tr:eq(1) td:last').html()).toEqual('g'); }); it('should remove amount of columns from end if index parameter is not given', () => { handsontable({ data: [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ] }); alter('remove_col', null, 3); expect(countCols()).toEqual(5); expect(spec().$container.find('tr:eq(1) td:last').html()).toEqual('e'); }); it('should fire beforeRemoveCol event before removing col', () => { const onBeforeRemoveCol = jasmine.createSpy('onBeforeRemoveCol'); handsontable({ beforeRemoveCol: onBeforeRemoveCol }); alter('remove_col'); expect(onBeforeRemoveCol).toHaveBeenCalledWith(countCols(), 1, [4], undefined, undefined, undefined); }); it('should not remove column if removing has been canceled by beforeRemoveCol event handler', () => { const onBeforeRemoveCol = jasmine.createSpy('onBeforeRemoveCol'); onBeforeRemoveCol.and.callFake(() => false); handsontable({ beforeRemoveCol: onBeforeRemoveCol }); expect(countCols()).toEqual(5); alter('remove_col'); expect(countCols()).toEqual(5); }); it('should fire callback on remove col', () => { let outputBefore; let outputAfter; handsontable({ minRows: 5, data: arrayOfArrays(), beforeRemoveCol(index, amount, removedCols, source) { outputBefore = [index, amount, removedCols, source]; }, afterRemoveCol(index, amount, removedCols, source) { outputAfter = [index, amount, removedCols, source]; } }); alter('remove_col', 1, 2, 'customSource'); expect(outputBefore).toEqual([1, 2, [1, 2], 'customSource']); expect(outputAfter).toEqual([1, 2, [1, 2], 'customSource']); }); it('should remove column\'s properties', () => { handsontable({ startCols: 3, startRows: 1 }); getCellMeta(0, 0).someValue = [0, 0]; getCellMeta(0, 1).someValue = [0, 1]; getCellMeta(0, 2).someValue = [0, 2]; alter('remove_col', 0); expect(getCellMeta(0, 0).someValue).toEqual([0, 1]); expect(getCellMeta(0, 1).someValue).toEqual([0, 2]); }); it('should remove column when not all rows are visible in the viewport', () => { spec().$container.css({ height: '100', overflow: 'auto' }); handsontable({ startCols: 3, startRows: 20 }); expect(getHtCore().find('tbody tr').length).toBeLessThan(20); expect(countCols()).toEqual(3); alter('remove_col', 0); expect(countCols()).toEqual(2); }); it('should not remove column header together with the column, if headers were NOT specified explicitly', () => { handsontable({ startCols: 3, startRows: 2, colHeaders: true }); expect(getColHeader()).toEqual(['A', 'B', 'C']); expect(countCols()).toEqual(3); alter('remove_col', 1); expect(countCols()).toEqual(2); expect(getColHeader()).toEqual(['A', 'B']); }); it('should remove column header together with the column, if headers were specified explicitly', () => { handsontable({ startCols: 3, startRows: 2, colHeaders: ['Header0', 'Header1', 'Header2'] }); expect(getColHeader()).toEqual(['Header0', 'Header1', 'Header2']); expect(countCols()).toEqual(3); alter('remove_col', 1); expect(countCols()).toEqual(2); expect(getColHeader()).toEqual(['Header0', 'Header2']); }); it('should decrement the number of fixed columns, if a fix column is removed', () => { const hot = handsontable({ startCols: 1, startRows: 3, fixedColumnsLeft: 4 }); alter('remove_col', 1, 1); expect(hot.getSettings().fixedColumnsLeft).toEqual(3); alter('remove_col', 1, 2); expect(hot.getSettings().fixedColumnsLeft).toEqual(1); }); it('should shift the cell meta according to the new column layout', () => { handsontable({ startCols: 4, startRows: 3 }); setCellMeta(1, 2, 'className', 'test'); alter('remove_col', 1, 1); expect(getCellMeta(1, 1).className).toEqual('test'); }); it('should shift the cell meta according to the new columns (>1) layout', () => { handsontable({ startCols: 4, startRows: 3 }); setCellMeta(1, 2, 'className', 'test'); alter('remove_col', 0, 2); expect(getCellMeta(1, 0).className).toEqual('test'); }); }); describe('insert row', () => { it('should insert row at given index', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['d1', 'd2', 'd3'], ['e1', 'e2', 'e3'] ] }); alter('insert_row', 1); expect(countRows()).toEqual(6); expect(spec().$container.find('tr:eq(2) td:eq(0)').html()).toEqual('b1'); }); it('should fire the beforeCreateRow hook before creating a row', () => { const onBeforeCreateRow = jasmine.createSpy('beforeCreateRow'); handsontable({ data: arrayOfNestedObjects(), columns: [ { data: 'id' }, { data: 'name.first' } ], beforeCreateRow: onBeforeCreateRow, }); alter('insert_row', 2, 1, 'customSource'); expect(onBeforeCreateRow).toHaveBeenCalledWith(2, 1, 'customSource', void 0, void 0, void 0); }); it('should not create row if removing has been canceled by beforeCreateRow hook handler', () => { const beforeCreateRow = jasmine.createSpy('beforeCreateRow'); beforeCreateRow.and.callFake(() => false); handsontable({ data: arrayOfNestedObjects(), columns: [ { data: 'id' }, { data: 'name.first' } ], beforeCreateRow }); expect(countRows()).toEqual(3); alter('insert_row'); expect(countRows()).toEqual(3); }); it('should insert row at the end if index is not given', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['d1', 'd2', 'd3'], ['e1', 'e2', 'e3'] ] }); alter('insert_row'); expect(countRows()).toEqual(6); expect(getHtCore().find('tr:eq(4) td:eq(0)').html()).toEqual('e1'); expect(getHtCore().find('tr:last td:eq(0)').html()).toEqual(''); }); it('should not change cellMeta after executing `insert row` without parameters (#3581, #3989, #2114)', () => { const greenRenderer = function(instance, td, ...args) { Handsontable.renderers.TextRenderer.apply(this, [instance, td, ...args]); td.style.backgroundColor = 'green'; }; handsontable({ data: [ [0, 'a', true], [1, 'b', false], [2, 'c', true], [3, 'd', true] ], cell: [ { row: 0, col: 0, renderer: greenRenderer, type: 'text', readOnly: true } ], columns: [ { type: 'numeric' }, { type: 'text' }, { type: 'checkbox' } ] }); alter('insert_row'); expect(getCellMeta(1, 0).renderer).not.toBe(greenRenderer); expect(getCellMeta(1, 0).readOnly).toBe(false); expect(getCellMeta(4, 0).renderer).not.toBe(greenRenderer); expect(getCellMeta(4, 0).readOnly).toBe(false); }); it('should add new row which respect defined type of cells after executing `insert_row`', () => { handsontable({ data: [ [0, 'a', true], [1, 'b', false], [2, 'c', true], [3, 'd', true] ], cell: [ { row: 0, col: 0, type: 'text' } ], columns: [ { type: 'numeric' }, { type: 'text' }, { type: 'checkbox' } ] }); alter('insert_row'); // added row expect(getCellMeta(4, 0).type).toEqual('numeric'); expect(getDataAtCell(4, 0)).toEqual(null); expect(getCellMeta(4, 2).type).toEqual('checkbox'); expect(getDataAtCell(4, 2)).toEqual(null); }); it('should insert the amount of rows at given index', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['d1', 'd2', 'd3'], ['e1', 'e2', 'e3'] ] }); alter('insert_row', 1, 3); expect(countRows()).toEqual(8); expect(spec().$container.find('tr:eq(1) td:eq(0)').html()).toEqual(''); expect(spec().$container.find('tr:eq(4) td:eq(0)').html()).toEqual('b1'); }); it('should insert the amount of rows at the end if index is not given', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['d1', 'd2', 'd3'], ['e1', 'e2', 'e3'] ] }); alter('insert_row', null, 3); expect(countRows()).toEqual(8); expect(getHtCore().find('tr:eq(4) td:eq(0)').html()).toEqual('e1'); expect(getHtCore().find('tr:eq(5) td:eq(0)').html()).toEqual(''); expect(getHtCore().find('tr:eq(6) td:eq(0)').html()).toEqual(''); expect(getHtCore().find('tr:eq(7) td:eq(0)').html()).toEqual(''); }); it('should insert not more rows than maxRows', () => { handsontable({ startRows: 5, maxRows: 7 }); alter('insert_row', 1); alter('insert_row', 1); alter('insert_row', 1); expect(countRows()).toEqual(7); }); it('when amount parameter is used, should not insert more rows than allowed by maxRows', () => { handsontable({ data: [ ['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['d1', 'd2', 'd3'], ['e1', 'e2', 'e3'] ], maxRows: 10 }); alter('insert_row', 1, 10); expect(countRows()).toEqual(10); expect(spec().$container.find('tr:eq(6) td:eq(0)').html()).toEqual('b1'); }); it('should not add more source rows than defined in maxRows when trimming rows using the modifyRow hook', () => { const hot = handsontable({ data: Handsontable.helper.createSpreadsheetData(10, 4), modifyRow(row) { return [8, 9].indexOf(row) > -1 ? null : row; }, maxRows: 10 }); expect(hot.countRows()).toEqual(8); hot.populateFromArray(7, 0, [['a'], ['b'], ['c']]); expect(hot.countSourceRows()).toEqual(10); expect(hot.getDataAtCell(7, 0)).toEqual('a'); }); it('should fire callback on create row', () => { let outputBefore; let outputAfter; handsontable({ minRows: 5, data: arrayOfNestedObjects(), columns: [ { data: 'id' }, { data: 'name.first' } ], beforeCreateRow(index, amount, source) { outputBefore = [index, amount, source]; }, afterCreateRow(index, amount, source) { outputAfter = [index, amount, source]; }, }); alter('insert_row', 3, 1, 'customSource'); expect(outputBefore).toEqual([3, 1, 'customSource']); expect(outputAfter).toEqual([3, 1, 'customSource']); }); it('should keep the single-cell selection in the same position as before inserting the row', () => { handsontable({ minRows: 5, data: [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ] }); selectCell(2, 2); alter('insert_row', 2); const selected = getSelected(); expect(selected[0][0]).toBe(3); expect(selected[0][2]).toBe(3); expect(selected.length).toBe(1); }); it('should shift the cell meta according to the new row layout', () => { handsontable({ startCols: 4, startRows: 3 }); setCellMeta(2, 1, 'className', 'test'); alter('insert_row', 1, 1); expect(getCellMeta(3, 1).className).toEqual('test'); }); it('should shift the cell meta according to the new rows (>1) layout', () => { handsontable({ startCols: 4, startRows: 3 }); setCellMeta(2, 1, 'className', 'test'); alter('insert_row', 0, 3); expect(getCellMeta(5, 1).className).toEqual('test'); }); }); describe('insert column', () => { it('should insert column at given index', () => { handsontable({ data: [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ] }); alter('insert_col', 1); expect(countCols()).toEqual(9); expect(spec().$container.find('tr:eq(1) td:eq(2)').html()).toEqual('b'); }); it('should insert column at the end if index is not given', () => { handsontable({ data: [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ] }); alter('insert_col'); expect(countCols()).toEqual(9); expect(spec().$container.find('tr:eq(1) td:eq(7)').html()).toEqual('h'); }); it('should insert the amount of columns at given index', () => { handsontable({ data: [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ] }); alter('insert_col', 1, 3); expect(countCols()).toEqual(11); expect(spec().$container.find('tr:eq(1) td:eq(4)').html()).toEqual('b'); }); it('should insert the amount of columns at the end if index is not given', () => { handsontable({ data: [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ] }); alter('insert_col', null, 3); expect(countCols()).toEqual(11); expect(spec().$container.find('tr:eq(1) td:eq(7)').html()).toEqual('h'); expect(spec().$container.find('tr:eq(1) td:eq(8)').html()).toEqual(''); expect(spec().$container.find('tr:eq(1) td:eq(9)').html()).toEqual(''); expect(spec().$container.find('tr:eq(1) td:eq(10)').html()).toEqual(''); }); it('should insert not more cols than maxCols', () => { handsontable({ startCols: 5, maxCols: 7 }); alter('insert_col', 1); alter('insert_col', 1); alter('insert_col', 1); expect(countCols()).toEqual(7); }); it('should not insert more columns than allowed by maxCols, when amount parameter is used', () => { handsontable({ data: [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] ], maxCols: 10 }); alter('insert_col', 1, 10); expect(countCols()).toEqual(10); expect(spec().$container.find('tr:eq(1) td:eq(1)').html()).toEqual(''); expect(spec().$container.find('tr:eq(1) td:eq(2)').html()).toEqual(''); expect(spec().$container.find('tr:eq(1) td:eq(3)').html()).toEqual('b'); }); it('should fire callback on create col', () => { let outputBefore; let outputAfter; handsontable({ minRows: 5, data: arrayOfArrays(), beforeCreateCol(index, amount, source) { outputBefore = [index, amount, source]; }, afterCreateCol(index, amount, source) { outputAfter = [index, amount, source]; }, }); alter('insert_col', 2, 1, 'customSource'); expect(outputBefore).toEqual([2, 1, 'customSource']); expect(outputAfter).toEqual([2, 1, 'customSource']); }); it('should not create column header together with the column, if headers were NOT specified explicitly', () => { handsontable({ startCols: 3, startRows: 2, colHeaders: true }); expect(getColHeader()).toEqual(['A', 'B', 'C']); expect(countCols()).toEqual(3); alter('insert_col', 1); expect(countCols()).toEqual(4); expect(getColHeader()).toEqual(['A', 'B', 'C', 'D']); }); it('should create column header together with the column, if headers were specified explicitly', () => { handsontable({ startCols: 3, startRows: 2, colHeaders: ['Header0', 'Header1', 'Header2'] }); expect(getColHeader()).toEqual(['Header0', 'Header1', 'Header2']); expect(countCols()).toEqual(3); alter('insert_col', 1); expect(countCols()).toEqual(4); expect(getColHeader()).toEqual(['Header0', 'B', 'Header1', 'Header2']); }); it('should stretch the table after adding another column (if stretching is set to \'all\')', () => { spec().$container.css({ width: 500, }); const hot = handsontable({ startCols: 5, startRows: 10, stretchH: 'all' }); expect(Handsontable.dom.outerWidth(hot.view.TBODY)).toEqual(500); alter('insert_col', null, 1); expect(Handsontable.dom.outerWidth(hot.view.TBODY)).toEqual(500); alter('insert_col', null, 1); expect(Handsontable.dom.outerWidth(hot.view.TBODY)).toEqual(500); }); it('should shift the cell meta according to the new column layout', () => { handsontable({ startCols: 4, startRows: 3 }); setCellMeta(1, 2, 'className', 'test'); alter('insert_col', 1, 1); expect(getCellMeta(1, 3).className).toEqual('test'); }); it('should shift the cell meta according to the new columns (>1) layout', () => { handsontable({ startCols: 4, startRows: 3 }); setCellMeta(1, 2, 'className', 'test'); alter('insert_col', 0, 3); expect(getCellMeta(1, 5).className).toEqual('test'); }); }); });
1
15,203
It's no longer possible to use `modifyRow` to trim rows and that's ok to use TrimRows instead. But I think we should move this test to TrimRows plugin tests as `trimRows` setting is not a part of the `core`.
handsontable-handsontable
js
@@ -688,6 +688,17 @@ static int on_config_mime_setdefaulttype(h2o_configurator_command_t *cmd, h2o_co return 0; } +static const char *get_ext(h2o_configurator_command_t *cmd, yoml_t *node) +{ + if (strcmp(node->data.scalar, "default") == 0) { + return node->data.scalar; + } else if (assert_is_extension(cmd, node) == 0) { + return node->data.scalar + 1; + } else { + return NULL; + } +} + static int on_config_custom_handler(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { static const char *ignore_commands[] = {"extension", NULL};
1
/* * Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Fastly, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <inttypes.h> #include "h2o.h" #include "h2o/configurator.h" struct st_core_config_vars_t { struct { unsigned reprioritize_blocking_assets : 1; unsigned push_preload : 1; h2o_casper_conf_t casper; } http2; struct { unsigned emit_request_errors : 1; } error_log; }; struct st_core_configurator_t { h2o_configurator_t super; struct st_core_config_vars_t *vars, _vars_stack[H2O_CONFIGURATOR_NUM_LEVELS + 1]; }; static h2o_configurator_context_t *create_context(h2o_configurator_context_t *parent, int is_custom_handler) { h2o_configurator_context_t *ctx = h2o_mem_alloc(sizeof(*ctx)); if (parent == NULL) { *ctx = (h2o_configurator_context_t){NULL}; return ctx; } *ctx = *parent; if (ctx->env != NULL) h2o_mem_addref_shared(ctx->env); ctx->parent = parent; return ctx; } static void destroy_context(h2o_configurator_context_t *ctx) { if (ctx->env != NULL) { if (ctx->pathconf != NULL) ctx->pathconf->env = ctx->env; else h2o_mem_release_shared(ctx->env); } free(ctx); } static int on_core_enter(h2o_configurator_t *_self, h2o_configurator_context_t *ctx, yoml_t *node) { struct st_core_configurator_t *self = (void *)_self; ++self->vars; self->vars[0] = self->vars[-1]; return 0; } static int on_core_exit(h2o_configurator_t *_self, h2o_configurator_context_t *ctx, yoml_t *node) { struct st_core_configurator_t *self = (void *)_self; if (ctx->hostconf != NULL && ctx->pathconf == NULL) { /* exitting from host-level configuration */ ctx->hostconf->http2.reprioritize_blocking_assets = self->vars->http2.reprioritize_blocking_assets; ctx->hostconf->http2.push_preload = self->vars->http2.push_preload; ctx->hostconf->http2.casper = self->vars->http2.casper; } else if (ctx->pathconf != NULL) { /* exitting from path or extension-level configuration */ ctx->pathconf->error_log.emit_request_errors = self->vars->error_log.emit_request_errors; } --self->vars; return 0; } static void destroy_configurator(h2o_configurator_t *configurator) { if (configurator->dispose != NULL) configurator->dispose(configurator); free(configurator->commands.entries); free(configurator); } static int setup_configurators(h2o_configurator_context_t *ctx, int is_enter, yoml_t *node) { h2o_linklist_t *n; for (n = ctx->globalconf->configurators.next; n != &ctx->globalconf->configurators; n = n->next) { h2o_configurator_t *c = H2O_STRUCT_FROM_MEMBER(h2o_configurator_t, _link, n); if (is_enter) { if (c->enter != NULL && c->enter(c, ctx, node) != 0) return -1; } else { if (c->exit != NULL && c->exit(c, ctx, node) != 0) return -1; } } return 0; } static int config_timeout(h2o_configurator_command_t *cmd, yoml_t *node, uint64_t *slot) { uint64_t timeout_in_secs; if (h2o_configurator_scanf(cmd, node, "%" SCNu64, &timeout_in_secs) != 0) return -1; *slot = timeout_in_secs * 1000; return 0; } int h2o_configurator_apply_commands(h2o_configurator_context_t *ctx, yoml_t *node, int flags_mask, const char **ignore_commands) { struct st_cmd_value_t { h2o_configurator_command_t *cmd; yoml_t *value; }; H2O_VECTOR(struct st_cmd_value_t) deferred = {NULL}, semi_deferred = {NULL}; int ret = -1; if (node != NULL && node->type != YOML_TYPE_MAPPING) { h2o_configurator_errprintf(NULL, node, "node must be a MAPPING"); goto Exit; } /* call on_enter of every configurator */ if (setup_configurators(ctx, 1, node) != 0) goto Exit; /* handle the configuration commands */ if (node != NULL) { size_t i; for (i = 0; i != node->data.mapping.size; ++i) { yoml_t *key = node->data.mapping.elements[i].key, *value = node->data.mapping.elements[i].value; h2o_configurator_command_t *cmd; /* obtain the target command */ if (key->type != YOML_TYPE_SCALAR) { h2o_configurator_errprintf(NULL, key, "command must be a string"); goto Exit; } if (ignore_commands != NULL) { size_t i; for (i = 0; ignore_commands[i] != NULL; ++i) if (strcmp(ignore_commands[i], key->data.scalar) == 0) goto SkipCommand; } if ((cmd = h2o_configurator_get_command(ctx->globalconf, key->data.scalar)) == NULL) { h2o_configurator_errprintf(NULL, key, "unknown command: %s", key->data.scalar); goto Exit; } if ((cmd->flags & flags_mask) == 0) { h2o_configurator_errprintf(cmd, key, "the command cannot be used at this level"); goto Exit; } /* check value type */ if ((cmd->flags & (H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR | H2O_CONFIGURATOR_FLAG_EXPECT_SEQUENCE | H2O_CONFIGURATOR_FLAG_EXPECT_MAPPING)) != 0) { switch (value->type) { case YOML_TYPE_SCALAR: if ((cmd->flags & H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR) == 0) { h2o_configurator_errprintf(cmd, value, "argument cannot be a scalar"); goto Exit; } break; case YOML_TYPE_SEQUENCE: if ((cmd->flags & H2O_CONFIGURATOR_FLAG_EXPECT_SEQUENCE) == 0) { h2o_configurator_errprintf(cmd, value, "argument cannot be a sequence"); goto Exit; } break; case YOML_TYPE_MAPPING: if ((cmd->flags & H2O_CONFIGURATOR_FLAG_EXPECT_MAPPING) == 0) { h2o_configurator_errprintf(cmd, value, "argument cannot be a mapping"); goto Exit; } break; default: assert(!"unreachable"); break; } } /* handle the command (or keep it for later execution) */ if ((cmd->flags & H2O_CONFIGURATOR_FLAG_SEMI_DEFERRED) != 0) { h2o_vector_reserve(NULL, &semi_deferred, semi_deferred.size + 1); semi_deferred.entries[semi_deferred.size++] = (struct st_cmd_value_t){cmd, value}; } else if ((cmd->flags & H2O_CONFIGURATOR_FLAG_DEFERRED) != 0) { h2o_vector_reserve(NULL, &deferred, deferred.size + 1); deferred.entries[deferred.size++] = (struct st_cmd_value_t){cmd, value}; } else { if (cmd->cb(cmd, ctx, value) != 0) goto Exit; } SkipCommand:; } for (i = 0; i != semi_deferred.size; ++i) { struct st_cmd_value_t *pair = semi_deferred.entries + i; if (pair->cmd->cb(pair->cmd, ctx, pair->value) != 0) goto Exit; } for (i = 0; i != deferred.size; ++i) { struct st_cmd_value_t *pair = deferred.entries + i; if (pair->cmd->cb(pair->cmd, ctx, pair->value) != 0) goto Exit; } } /* call on_exit of every configurator */ if (setup_configurators(ctx, 0, node) != 0) goto Exit; ret = 0; Exit: free(deferred.entries); free(semi_deferred.entries); return ret; } static int sort_from_longer_paths(const yoml_mapping_element_t *x, const yoml_mapping_element_t *y) { size_t xlen = strlen(x->key->data.scalar), ylen = strlen(y->key->data.scalar); if (xlen < ylen) return 1; else if (xlen > ylen) return -1; /* apply strcmp for stable sort */ return strcmp(x->key->data.scalar, y->key->data.scalar); } static yoml_t *convert_path_config_node(h2o_configurator_command_t *cmd, yoml_t *node) { size_t i, j; switch (node->type) { case YOML_TYPE_MAPPING: break; case YOML_TYPE_SEQUENCE: { /* convert to mapping */ yoml_t *map = h2o_mem_alloc(sizeof(yoml_t)); *map = (yoml_t){YOML_TYPE_MAPPING}; if (node->filename != NULL) map->filename = h2o_strdup(NULL, node->filename, SIZE_MAX).base; map->line = node->line; map->column = node->column; if (node->anchor != NULL) map->anchor = h2o_strdup(NULL, node->anchor, SIZE_MAX).base; map->_refcnt = 1; for (i = 0; i != node->data.sequence.size; ++i) { yoml_t *elem = node->data.sequence.elements[i]; if (elem->type != YOML_TYPE_MAPPING) { yoml_free(map, NULL); goto Error; } for (j = 0; j != elem->data.mapping.size; ++j) { yoml_t *elemkey = elem->data.mapping.elements[j].key; yoml_t *elemvalue = elem->data.mapping.elements[j].value; map = h2o_mem_realloc(map, offsetof(yoml_t, data.mapping.elements) + sizeof(yoml_mapping_element_t) * (map->data.mapping.size + 1)); map->data.mapping.elements[map->data.mapping.size].key = elemkey; map->data.mapping.elements[map->data.mapping.size].value = elemvalue; ++map->data.mapping.size; ++elemkey->_refcnt; ++elemvalue->_refcnt; } } return map; } break; default: Error: h2o_configurator_errprintf(cmd, node, "value must be a mapping or sequence of mapping"); return NULL; } ++node->_refcnt; return node; } static int config_path(h2o_configurator_context_t *parent_ctx, h2o_pathconf_t *pathconf, yoml_t *node) { h2o_configurator_context_t *path_ctx = create_context(parent_ctx, 0); path_ctx->pathconf = pathconf; path_ctx->mimemap = &pathconf->mimemap; int ret = h2o_configurator_apply_commands(path_ctx, node, H2O_CONFIGURATOR_FLAG_PATH, NULL); destroy_context(path_ctx); return ret; } static int on_config_paths(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { size_t i; /* sort by the length of the path (descending) */ for (i = 0; i != node->data.mapping.size; ++i) { yoml_t *key = node->data.mapping.elements[i].key; if (key->type != YOML_TYPE_SCALAR) { h2o_configurator_errprintf(cmd, key, "key (representing the virtual path) must be a string"); return -1; } } qsort(node->data.mapping.elements, node->data.mapping.size, sizeof(node->data.mapping.elements[0]), (int (*)(const void *, const void *))sort_from_longer_paths); for (i = 0; i != node->data.mapping.size; ++i) { yoml_t *key = node->data.mapping.elements[i].key, *value; if ((value = convert_path_config_node(cmd, node->data.mapping.elements[i].value)) == NULL) return -1; h2o_pathconf_t *pathconf = h2o_config_register_path(ctx->hostconf, key->data.scalar, 0); int cmd_ret = config_path(ctx, pathconf, value); yoml_free(value, NULL); if (cmd_ret != 0) return cmd_ret; } /* configure fallback path along with ordinary paths */ return config_path(ctx, &ctx->hostconf->fallback_path, NULL); } static int on_config_hosts(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { size_t i; if (node->data.mapping.size == 0) { h2o_configurator_errprintf(cmd, node, "the mapping cannot be empty"); return -1; } for (i = 0; i != node->data.mapping.size; ++i) { yoml_t *key = node->data.mapping.elements[i].key; yoml_t *value = node->data.mapping.elements[i].value; h2o_iovec_t hostname; uint16_t port; if (key->type != YOML_TYPE_SCALAR) { h2o_configurator_errprintf(cmd, key, "key (representing the hostname) must be a string"); return -1; } if (h2o_url_parse_hostport(key->data.scalar, strlen(key->data.scalar), &hostname, &port) == NULL) { h2o_configurator_errprintf(cmd, key, "invalid key (must be either `host` or `host:port`)"); return -1; } assert(hostname.len != 0); if ((hostname.base[0] == '*' && !(hostname.len == 1 || hostname.base[1] == '.')) || memchr(hostname.base + 1, '*', hostname.len - 1) != NULL) { h2o_configurator_errprintf(cmd, key, "wildcard (*) can only be used at the start of the hostname"); return -1; } h2o_configurator_context_t *host_ctx = create_context(ctx, 0); if ((host_ctx->hostconf = h2o_config_register_host(host_ctx->globalconf, hostname, port)) == NULL) { h2o_configurator_errprintf(cmd, key, "duplicate host entry"); destroy_context(host_ctx); return -1; } host_ctx->mimemap = &host_ctx->hostconf->mimemap; int cmd_ret = h2o_configurator_apply_commands(host_ctx, value, H2O_CONFIGURATOR_FLAG_HOST, NULL); destroy_context(host_ctx); if (cmd_ret != 0) return -1; if (yoml_get(value, "paths") == NULL) { h2o_configurator_errprintf(NULL, value, "mandatory configuration directive `paths` is missing"); return -1; } } return 0; } static int on_config_limit_request_body(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { return h2o_configurator_scanf(cmd, node, "%zu", &ctx->globalconf->max_request_entity_size); } static int on_config_max_delegations(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { return h2o_configurator_scanf(cmd, node, "%u", &ctx->globalconf->max_delegations); } static int on_config_handshake_timeout(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { return config_timeout(cmd, node, &ctx->globalconf->handshake_timeout); } static int on_config_http1_request_timeout(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { return config_timeout(cmd, node, &ctx->globalconf->http1.req_timeout); } static int on_config_http1_upgrade_to_http2(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { ssize_t ret = h2o_configurator_get_one_of(cmd, node, "OFF,ON"); if (ret == -1) return -1; ctx->globalconf->http1.upgrade_to_http2 = (int)ret; return 0; } static int on_config_http2_idle_timeout(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { return config_timeout(cmd, node, &ctx->globalconf->http2.idle_timeout); } static int on_config_http2_graceful_shutdown_timeout(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { return config_timeout(cmd, node, &ctx->globalconf->http2.graceful_shutdown_timeout); } static int on_config_http2_max_concurrent_requests_per_connection(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { return h2o_configurator_scanf(cmd, node, "%zu", &ctx->globalconf->http2.max_concurrent_requests_per_connection); } static int on_config_http2_latency_optimization_min_rtt(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { return h2o_configurator_scanf(cmd, node, "%u", &ctx->globalconf->http2.latency_optimization.min_rtt); } static int on_config_http2_latency_optimization_max_additional_delay(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { double ratio; if (h2o_configurator_scanf(cmd, node, "%lf", &ratio) != 0) return -1; if (!(0.0 < ratio)) { h2o_configurator_errprintf(cmd, node, "ratio must be a positive number"); return -1; } ctx->globalconf->http2.latency_optimization.max_additional_delay = 100 * ratio; return 0; } static int on_config_http2_latency_optimization_max_cwnd(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { return h2o_configurator_scanf(cmd, node, "%u", &ctx->globalconf->http2.latency_optimization.max_cwnd); } static int on_config_http2_reprioritize_blocking_assets(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { struct st_core_configurator_t *self = (void *)cmd->configurator; ssize_t on; if ((on = h2o_configurator_get_one_of(cmd, node, "OFF,ON")) == -1) return -1; self->vars->http2.reprioritize_blocking_assets = (int)on; return 0; } static int on_config_http2_push_preload(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { struct st_core_configurator_t *self = (void *)cmd->configurator; ssize_t on; if ((on = h2o_configurator_get_one_of(cmd, node, "OFF,ON")) == -1) return -1; self->vars->http2.push_preload = (int)on; return 0; } static int on_config_http2_casper(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { static const h2o_casper_conf_t defaults = { 13, /* casper_bits: default (2^13 ~= 100 assets * 1/0.01 collision probability) */ 0 /* track blocking assets only */ }; struct st_core_configurator_t *self = (void *)cmd->configurator; switch (node->type) { case YOML_TYPE_SCALAR: if (strcasecmp(node->data.scalar, "OFF") == 0) { self->vars->http2.casper = (h2o_casper_conf_t){0}; } else if (strcasecmp(node->data.scalar, "ON") == 0) { self->vars->http2.casper = defaults; } break; case YOML_TYPE_MAPPING: { /* set to default */ self->vars->http2.casper = defaults; /* override the attributes defined */ yoml_t *t; if ((t = yoml_get(node, "capacity-bits")) != NULL) { if (!(t->type == YOML_TYPE_SCALAR && sscanf(t->data.scalar, "%u", &self->vars->http2.casper.capacity_bits) == 1 && self->vars->http2.casper.capacity_bits < 16)) { h2o_configurator_errprintf(cmd, t, "value of `capacity-bits` must be an integer between 0 to 15"); return -1; } } if ((t = yoml_get(node, "tracking-types")) != NULL) { if (t->type == YOML_TYPE_SCALAR && strcasecmp(t->data.scalar, "blocking-assets") == 0) { self->vars->http2.casper.track_all_types = 0; } else if (t->type == YOML_TYPE_SCALAR && strcasecmp(t->data.scalar, "all") == 0) { self->vars->http2.casper.track_all_types = 1; } else { h2o_configurator_errprintf(cmd, t, "value of `tracking-types` must be either of: `blocking-assets` or `all`"); return -1; } } } break; default: h2o_configurator_errprintf(cmd, node, "value must be `OFF`,`ON` or a mapping containing the necessary attributes"); return -1; } return 0; } static int assert_is_mimetype(h2o_configurator_command_t *cmd, yoml_t *node) { if (node->type != YOML_TYPE_SCALAR) { h2o_configurator_errprintf(cmd, node, "expected a scalar (mime-type)"); return -1; } if (strchr(node->data.scalar, '/') == NULL) { h2o_configurator_errprintf(cmd, node, "the string \"%s\" does not look like a mime-type", node->data.scalar); return -1; } return 0; } static int assert_is_extension(h2o_configurator_command_t *cmd, yoml_t *node) { if (node->type != YOML_TYPE_SCALAR) { h2o_configurator_errprintf(cmd, node, "expected a scalar (extension)"); return -1; } if (node->data.scalar[0] != '.') { h2o_configurator_errprintf(cmd, node, "given extension \"%s\" does not start with a \".\"", node->data.scalar); return -1; } return 0; } static int set_mimetypes(h2o_configurator_command_t *cmd, h2o_mimemap_t *mimemap, yoml_t *node) { size_t i, j; assert(node->type == YOML_TYPE_MAPPING); for (i = 0; i != node->data.mapping.size; ++i) { yoml_t *key = node->data.mapping.elements[i].key; yoml_t *value = node->data.mapping.elements[i].value; if (assert_is_mimetype(cmd, key) != 0) return -1; switch (value->type) { case YOML_TYPE_SCALAR: if (assert_is_extension(cmd, value) != 0) return -1; h2o_mimemap_define_mimetype(mimemap, value->data.scalar + 1, key->data.scalar, NULL); break; case YOML_TYPE_SEQUENCE: for (j = 0; j != value->data.sequence.size; ++j) { yoml_t *ext_node = value->data.sequence.elements[j]; if (assert_is_extension(cmd, ext_node) != 0) return -1; h2o_mimemap_define_mimetype(mimemap, ext_node->data.scalar + 1, key->data.scalar, NULL); } break; case YOML_TYPE_MAPPING: { yoml_t *t; h2o_mime_attributes_t attr; h2o_mimemap_get_default_attributes(key->data.scalar, &attr); if ((t = yoml_get(value, "is_compressible")) != NULL) { if (t->type == YOML_TYPE_SCALAR && strcasecmp(t->data.scalar, "YES") == 0) { attr.is_compressible = 1; } else if (t->type == YOML_TYPE_SCALAR && strcasecmp(t->data.scalar, "NO") == 0) { attr.is_compressible = 0; } else { h2o_configurator_errprintf(cmd, t, "`is_compressible` attribute must be either of: `YES` or `NO`"); return -1; } } if ((t = yoml_get(value, "priority")) != NULL) { if (t->type == YOML_TYPE_SCALAR && strcasecmp(t->data.scalar, "normal") == 0) { attr.priority = H2O_MIME_ATTRIBUTE_PRIORITY_NORMAL; } else if (t->type == YOML_TYPE_SCALAR && strcasecmp(t->data.scalar, "highest") == 0) { attr.priority = H2O_MIME_ATTRIBUTE_PRIORITY_HIGHEST; } else { h2o_configurator_errprintf(cmd, t, "`priority` attribute must be either of: `normal` or `highest`"); return -1; } } if ((t = yoml_get(value, "extensions")) == NULL) { h2o_configurator_errprintf(cmd, value, "cannot find mandatory attribute `extensions`"); return -1; } if (t->type != YOML_TYPE_SEQUENCE) { h2o_configurator_errprintf(cmd, t, "`extensions` attribute must be a sequence of scalars"); return -1; } for (j = 0; j != t->data.sequence.size; ++j) { yoml_t *ext_node = t->data.sequence.elements[j]; if (assert_is_extension(cmd, ext_node) != 0) return -1; h2o_mimemap_define_mimetype(mimemap, ext_node->data.scalar + 1, key->data.scalar, &attr); } } break; default: fprintf(stderr, "logic flaw at %s:%d\n", __FILE__, __LINE__); abort(); } } return 0; } static int on_config_mime_settypes(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { h2o_mimemap_t *newmap = h2o_mimemap_create(); h2o_mimemap_set_default_type(newmap, h2o_mimemap_get_default_type(*ctx->mimemap)->data.mimetype.base, NULL); if (set_mimetypes(cmd, newmap, node) != 0) { h2o_mem_release_shared(newmap); return -1; } h2o_mem_release_shared(*ctx->mimemap); *ctx->mimemap = newmap; return 0; } static void clone_mimemap_if_clean(h2o_configurator_context_t *ctx) { if (ctx->parent == NULL) return; if (*ctx->mimemap != *ctx->parent->mimemap) return; h2o_mem_release_shared(*ctx->mimemap); /* even after release, ctx->mimemap is still retained by the parent and therefore we can use it as the argument to clone */ *ctx->mimemap = h2o_mimemap_clone(*ctx->mimemap); } static int on_config_mime_addtypes(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { clone_mimemap_if_clean(ctx); return set_mimetypes(cmd, *ctx->mimemap, node); } static int on_config_mime_removetypes(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { size_t i; clone_mimemap_if_clean(ctx); for (i = 0; i != node->data.sequence.size; ++i) { yoml_t *ext_node = node->data.sequence.elements[i]; if (assert_is_extension(cmd, ext_node) != 0) return -1; h2o_mimemap_remove_type(*ctx->mimemap, ext_node->data.scalar + 1); } return 0; } static int on_config_mime_setdefaulttype(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { if (assert_is_mimetype(cmd, node) != 0) return -1; clone_mimemap_if_clean(ctx); h2o_mimemap_set_default_type(*ctx->mimemap, node->data.scalar, NULL); return 0; } static int on_config_custom_handler(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { static const char *ignore_commands[] = {"extension", NULL}; yoml_t *ext_node; const char **exts; h2o_mimemap_type_t *type = NULL; if (node->type != YOML_TYPE_MAPPING) { h2o_configurator_errprintf(cmd, node, "argument must be a MAPPING"); return -1; } if ((ext_node = yoml_get(node, "extension")) == NULL) { h2o_configurator_errprintf(cmd, node, "mandatory key `extension` is missing"); return -1; } /* create dynamic type */ switch (ext_node->type) { case YOML_TYPE_SCALAR: if (assert_is_extension(cmd, ext_node) != 0) return -1; exts = alloca(2 * sizeof(*exts)); exts[0] = ext_node->data.scalar + 1; exts[1] = NULL; break; case YOML_TYPE_SEQUENCE: { exts = alloca((ext_node->data.sequence.size + 1) * sizeof(*exts)); size_t i; for (i = 0; i != ext_node->data.sequence.size; ++i) { yoml_t *n = ext_node->data.sequence.elements[i]; if (assert_is_extension(cmd, n) != 0) return -1; exts[i] = n->data.scalar + 1; } exts[i] = NULL; } break; default: h2o_configurator_errprintf(cmd, ext_node, "`extensions` must be a scalar or sequence of scalar"); return -1; } clone_mimemap_if_clean(ctx); type = h2o_mimemap_define_dynamic(*ctx->mimemap, exts, ctx->globalconf); /* apply the configuration commands */ h2o_configurator_context_t *ext_ctx = create_context(ctx, 1); ext_ctx->pathconf = &type->data.dynamic.pathconf; ext_ctx->mimemap = NULL; int cmd_ret = h2o_configurator_apply_commands(ext_ctx, node, H2O_CONFIGURATOR_FLAG_EXTENSION, ignore_commands); destroy_context(ext_ctx); if (cmd_ret != 0) return cmd_ret; switch (type->data.dynamic.pathconf.handlers.size) { case 1: break; case 0: h2o_configurator_errprintf(cmd, node, "no handler declared for given extension"); return -1; default: h2o_configurator_errprintf(cmd, node, "cannot assign more than one handler for given extension"); return -1; } return 0; } static void inherit_env_if_necessary(h2o_configurator_context_t *ctx) { if (ctx->env == (ctx->parent != NULL ? ctx->parent->env : NULL)) ctx->env = h2o_config_create_envconf(ctx->env); } static int on_config_setenv(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { size_t i; inherit_env_if_necessary(ctx); for (i = 0; i != node->data.mapping.size; ++i) { yoml_t *key = node->data.mapping.elements[i].key, *value = node->data.mapping.elements[i].value; if (key->type != YOML_TYPE_SCALAR) { h2o_configurator_errprintf(cmd, key, "key must be a scalar"); return -1; } if (value->type != YOML_TYPE_SCALAR) { h2o_configurator_errprintf(cmd, value, "value must be a scalar"); return -1; } h2o_config_setenv(ctx->env, key->data.scalar, value->data.scalar); } return 0; } static int on_config_unsetenv(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { inherit_env_if_necessary(ctx); switch (node->type) { case YOML_TYPE_SCALAR: h2o_config_unsetenv(ctx->env, node->data.scalar); break; case YOML_TYPE_SEQUENCE: { size_t i; for (i = 0; i != node->data.sequence.size; ++i) { yoml_t *element = node->data.sequence.elements[i]; if (element->type != YOML_TYPE_SCALAR) { h2o_configurator_errprintf(cmd, element, "element of a sequence passed to unsetenv must be a scalar"); return -1; } h2o_config_unsetenv(ctx->env, element->data.scalar); } } break; default: h2o_configurator_errprintf(cmd, node, "argument to unsetenv must be either a scalar or a sequence"); return -1; } return 0; } static int on_config_server_name(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { ctx->globalconf->server_name = h2o_strdup(NULL, node->data.scalar, SIZE_MAX); return 0; } static int on_config_send_server_name(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { switch(h2o_configurator_get_one_of(cmd, node, "OFF,ON,preserve")) { case 0: /* off */ ctx->globalconf->server_name = h2o_iovec_init(H2O_STRLIT("")); break; case 1: /* on */ break; case 2: /* preserve */ ctx->globalconf->server_name = h2o_iovec_init(H2O_STRLIT("")); ctx->globalconf->proxy.preserve_server_header = 1; break; default: return -1; } return 0; } static int on_config_error_log_emit_request_errors(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) { struct st_core_configurator_t *self = (void *)cmd->configurator; ssize_t on; if ((on = h2o_configurator_get_one_of(cmd, node, "OFF,ON")) == -1) return -1; self->vars->error_log.emit_request_errors = (int)on; return 0; } void h2o_configurator__init_core(h2o_globalconf_t *conf) { /* check if already initialized */ if (h2o_configurator_get_command(conf, "files") != NULL) return; { /* `hosts` and `paths` */ h2o_configurator_t *c = h2o_configurator_create(conf, sizeof(*c)); h2o_configurator_define_command(c, "hosts", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_MAPPING | H2O_CONFIGURATOR_FLAG_DEFERRED, on_config_hosts); h2o_configurator_define_command(c, "paths", H2O_CONFIGURATOR_FLAG_HOST | H2O_CONFIGURATOR_FLAG_EXPECT_MAPPING | H2O_CONFIGURATOR_FLAG_DEFERRED, on_config_paths); }; { /* setup global configurators */ struct st_core_configurator_t *c = (void *)h2o_configurator_create(conf, sizeof(*c)); c->super.enter = on_core_enter; c->super.exit = on_core_exit; c->vars = c->_vars_stack; c->vars->http2.reprioritize_blocking_assets = 1; /* defaults to ON */ c->vars->http2.push_preload = 1; /* defaults to ON */ c->vars->error_log.emit_request_errors = 1; /* defaults to ON */ h2o_configurator_define_command(&c->super, "limit-request-body", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_limit_request_body); h2o_configurator_define_command(&c->super, "max-delegations", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_max_delegations); h2o_configurator_define_command(&c->super, "handshake-timeout", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_handshake_timeout); h2o_configurator_define_command(&c->super, "http1-request-timeout", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_http1_request_timeout); h2o_configurator_define_command(&c->super, "http1-upgrade-to-http2", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_http1_upgrade_to_http2); h2o_configurator_define_command(&c->super, "http2-idle-timeout", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_http2_idle_timeout); h2o_configurator_define_command(&c->super, "http2-graceful-shutdown-timeout", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_http2_graceful_shutdown_timeout); h2o_configurator_define_command(&c->super, "http2-max-concurrent-requests-per-connection", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_http2_max_concurrent_requests_per_connection); h2o_configurator_define_command(&c->super, "http2-latency-optimization-min-rtt", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_http2_latency_optimization_min_rtt); h2o_configurator_define_command(&c->super, "http2-latency-optimization-max-additional-delay", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_http2_latency_optimization_max_additional_delay); h2o_configurator_define_command(&c->super, "http2-latency-optimization-max-cwnd", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_http2_latency_optimization_max_cwnd); h2o_configurator_define_command(&c->super, "http2-reprioritize-blocking-assets", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_HOST | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_http2_reprioritize_blocking_assets); h2o_configurator_define_command(&c->super, "http2-push-preload", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_HOST | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_http2_push_preload); h2o_configurator_define_command(&c->super, "http2-casper", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_HOST, on_config_http2_casper); h2o_configurator_define_command(&c->super, "file.mime.settypes", (H2O_CONFIGURATOR_FLAG_ALL_LEVELS & ~H2O_CONFIGURATOR_FLAG_EXTENSION) | H2O_CONFIGURATOR_FLAG_EXPECT_MAPPING, on_config_mime_settypes); h2o_configurator_define_command(&c->super, "file.mime.addtypes", (H2O_CONFIGURATOR_FLAG_ALL_LEVELS & ~H2O_CONFIGURATOR_FLAG_EXTENSION) | H2O_CONFIGURATOR_FLAG_EXPECT_MAPPING, on_config_mime_addtypes); h2o_configurator_define_command(&c->super, "file.mime.removetypes", (H2O_CONFIGURATOR_FLAG_ALL_LEVELS & ~H2O_CONFIGURATOR_FLAG_EXTENSION) | H2O_CONFIGURATOR_FLAG_EXPECT_SEQUENCE, on_config_mime_removetypes); h2o_configurator_define_command(&c->super, "file.mime.setdefaulttype", (H2O_CONFIGURATOR_FLAG_ALL_LEVELS & ~H2O_CONFIGURATOR_FLAG_EXTENSION) | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_mime_setdefaulttype); h2o_configurator_define_command(&c->super, "file.custom-handler", (H2O_CONFIGURATOR_FLAG_ALL_LEVELS & ~H2O_CONFIGURATOR_FLAG_EXTENSION) | H2O_CONFIGURATOR_FLAG_SEMI_DEFERRED, on_config_custom_handler); h2o_configurator_define_command(&c->super, "setenv", H2O_CONFIGURATOR_FLAG_ALL_LEVELS | H2O_CONFIGURATOR_FLAG_EXPECT_MAPPING, on_config_setenv); h2o_configurator_define_command(&c->super, "unsetenv", H2O_CONFIGURATOR_FLAG_ALL_LEVELS, on_config_unsetenv); h2o_configurator_define_command(&c->super, "server-name", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_server_name); h2o_configurator_define_command(&c->super, "send-server-name", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR | H2O_CONFIGURATOR_FLAG_DEFERRED, on_config_send_server_name); h2o_configurator_define_command(&c->super, "error-log.emit-request-errors", H2O_CONFIGURATOR_FLAG_ALL_LEVELS | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR, on_config_error_log_emit_request_errors); } } void h2o_configurator__dispose_configurators(h2o_globalconf_t *conf) { while (!h2o_linklist_is_empty(&conf->configurators)) { h2o_configurator_t *c = H2O_STRUCT_FROM_MEMBER(h2o_configurator_t, _link, conf->configurators.next); h2o_linklist_unlink(&c->_link); if (c->dispose != NULL) c->dispose(c); destroy_configurator(c); } } h2o_configurator_t *h2o_configurator_create(h2o_globalconf_t *conf, size_t sz) { h2o_configurator_t *c; assert(sz >= sizeof(*c)); c = h2o_mem_alloc(sz); memset(c, 0, sz); h2o_linklist_insert(&conf->configurators, &c->_link); return c; } void h2o_configurator_define_command(h2o_configurator_t *configurator, const char *name, int flags, h2o_configurator_command_cb cb) { h2o_configurator_command_t *cmd; h2o_vector_reserve(NULL, &configurator->commands, configurator->commands.size + 1); cmd = configurator->commands.entries + configurator->commands.size++; cmd->configurator = configurator; cmd->flags = flags; cmd->name = name; cmd->cb = cb; } h2o_configurator_command_t *h2o_configurator_get_command(h2o_globalconf_t *conf, const char *name) { h2o_linklist_t *node; size_t i; for (node = conf->configurators.next; node != &conf->configurators; node = node->next) { h2o_configurator_t *configurator = H2O_STRUCT_FROM_MEMBER(h2o_configurator_t, _link, node); for (i = 0; i != configurator->commands.size; ++i) { h2o_configurator_command_t *cmd = configurator->commands.entries + i; if (strcmp(cmd->name, name) == 0) { return cmd; } } } return NULL; } int h2o_configurator_apply(h2o_globalconf_t *config, yoml_t *node, int dry_run) { h2o_configurator_context_t *ctx = create_context(NULL, 0); ctx->globalconf = config; ctx->mimemap = &ctx->globalconf->mimemap; ctx->dry_run = dry_run; int cmd_ret = h2o_configurator_apply_commands(ctx, node, H2O_CONFIGURATOR_FLAG_GLOBAL, NULL); destroy_context(ctx); if (cmd_ret != 0) return cmd_ret; if (config->hosts[0] == NULL) { h2o_configurator_errprintf(NULL, node, "mandatory configuration directive `hosts` is missing"); return -1; } return 0; } void h2o_configurator_errprintf(h2o_configurator_command_t *cmd, yoml_t *node, const char *reason, ...) { va_list args; fprintf(stderr, "[%s:%zu] ", node->filename ? node->filename : "-", node->line + 1); if (cmd != NULL) fprintf(stderr, "in command %s, ", cmd->name); va_start(args, reason); vfprintf(stderr, reason, args); va_end(args); fputc('\n', stderr); } int h2o_configurator_scanf(h2o_configurator_command_t *cmd, yoml_t *node, const char *fmt, ...) { va_list args; int sscan_ret; if (node->type != YOML_TYPE_SCALAR) goto Error; va_start(args, fmt); sscan_ret = vsscanf(node->data.scalar, fmt, args); va_end(args); if (sscan_ret != 1) goto Error; return 0; Error: h2o_configurator_errprintf(cmd, node, "argument must match the format: %s", fmt); return -1; } ssize_t h2o_configurator_get_one_of(h2o_configurator_command_t *cmd, yoml_t *node, const char *candidates) { const char *config_str, *cand_str; ssize_t config_str_len, cand_index; if (node->type != YOML_TYPE_SCALAR) goto Error; config_str = node->data.scalar; config_str_len = strlen(config_str); cand_str = candidates; for (cand_index = 0;; ++cand_index) { if (strncasecmp(cand_str, config_str, config_str_len) == 0 && (cand_str[config_str_len] == '\0' || cand_str[config_str_len] == ',')) { /* found */ return cand_index; } cand_str = strchr(cand_str, ','); if (cand_str == NULL) goto Error; cand_str += 1; /* skip ',' */ } /* not reached */ Error: h2o_configurator_errprintf(cmd, node, "argument must be one of: %s", candidates); return -1; } char *h2o_configurator_get_cmd_path(const char *cmd) { char *root, *cmd_fullpath; /* just return the cmd (being strdup'ed) in case we do not need to prefix the value */ if (cmd[0] == '/' || strchr(cmd, '/') == NULL) goto ReturnOrig; /* obtain root */ if ((root = getenv("H2O_ROOT")) == NULL) { root = H2O_TO_STR(H2O_ROOT); } /* build full-path and return */ cmd_fullpath = h2o_mem_alloc(strlen(root) + strlen(cmd) + 2); sprintf(cmd_fullpath, "%s/%s", root, cmd); return cmd_fullpath; ReturnOrig: return h2o_strdup(NULL, cmd, SIZE_MAX).base; }
1
12,482
The function seems to either return "default" or return the extension stripping the preceding dot. Does the fact mean that we can no longer register `.default` as an extension?
h2o-h2o
c
@@ -110,6 +110,8 @@ type Container struct { Image string // ImageID is the local ID of the image used in the container ImageID string + // ImageDigest is the sha-256 digest of the container image as pulled from the repository + ImageDigest string // Command is the command to run in the container which is specified in the task definition Command []string // CPU is the cpu limitation of the container which is specified in the task definition
1
// Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package container import ( "encoding/json" "fmt" "strconv" "sync" "time" apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" "github.com/aws/amazon-ecs-agent/agent/credentials" resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" "github.com/aws/aws-sdk-go/aws" "github.com/cihub/seelog" "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" ) const ( // defaultContainerSteadyStateStatus defines the container status at // which the container is assumed to be in steady state. It is set // to 'ContainerRunning' unless overridden defaultContainerSteadyStateStatus = apicontainerstatus.ContainerRunning // awslogsAuthExecutionRole is the string value passed in the task payload // that specifies that the log driver should be authenticated using the // execution role awslogsAuthExecutionRole = "ExecutionRole" // DockerHealthCheckType is the type of container health check provided by docker DockerHealthCheckType = "docker" // AuthTypeECR is to use image pull auth over ECR AuthTypeECR = "ecr" // AuthTypeASM is to use image pull auth over AWS Secrets Manager AuthTypeASM = "asm" // MetadataURIEnvironmentVariableName defines the name of the environment // variable in containers' config, which can be used by the containers to access the // v3 metadata endpoint MetadataURIEnvironmentVariableName = "ECS_CONTAINER_METADATA_URI" // MetadataURIFormat defines the URI format for v3 metadata endpoint MetadataURIFormat = "http://169.254.170.2/v3/%s" // SecretProviderSSM is to show secret provider being SSM SecretProviderSSM = "ssm" // SecretProviderASM is to show secret provider being ASM SecretProviderASM = "asm" // SecretTypeEnv is to show secret type being ENVIRONMENT_VARIABLE SecretTypeEnv = "ENVIRONMENT_VARIABLE" // TargetLogDriver is to show secret target being "LOG_DRIVER", the default will be "CONTAINER" SecretTargetLogDriver = "LOG_DRIVER" ) // DockerConfig represents additional metadata about a container to run. It's // remodeled from the `ecsacs` api model file. Eventually it should not exist // once this remodeling is refactored out. type DockerConfig struct { // Config is the configuration used to create container Config *string `json:"config"` // HostConfig is the configuration of container related to host resource HostConfig *string `json:"hostConfig"` // Version specifies the docker client API version to use Version *string `json:"version"` } // HealthStatus contains the health check result returned by docker type HealthStatus struct { // Status is the container health status Status apicontainerstatus.ContainerHealthStatus `json:"status,omitempty"` // Since is the timestamp when container health status changed Since *time.Time `json:"statusSince,omitempty"` // ExitCode is the exitcode of health check if failed ExitCode int `json:"exitCode,omitempty"` // Output is the output of health check Output string `json:"output,omitempty"` } // Container is the internal representation of a container in the ECS agent type Container struct { // Name is the name of the container specified in the task definition Name string // RuntimeID is the docker id of the container RuntimeID string // DependsOnUnsafe is the field which specifies the ordering for container startup and shutdown. DependsOnUnsafe []DependsOn `json:"dependsOn,omitempty"` // V3EndpointID is a container identifier used to construct v3 metadata endpoint; it's unique among // all the containers managed by the agent V3EndpointID string // Image is the image name specified in the task definition Image string // ImageID is the local ID of the image used in the container ImageID string // Command is the command to run in the container which is specified in the task definition Command []string // CPU is the cpu limitation of the container which is specified in the task definition CPU uint `json:"Cpu"` // GPUIDs is the list of GPU ids for a container GPUIDs []string // Memory is the memory limitation of the container which is specified in the task definition Memory uint // Links contains a list of containers to link, corresponding to docker option: --link Links []string // FirelensConfig contains configuration for a Firelens container FirelensConfig *FirelensConfig `json:"firelensConfiguration"` // VolumesFrom contains a list of container's volume to use, corresponding to docker option: --volumes-from VolumesFrom []VolumeFrom `json:"volumesFrom"` // MountPoints contains a list of volume mount paths MountPoints []MountPoint `json:"mountPoints"` // Ports contains a list of ports binding configuration Ports []PortBinding `json:"portMappings"` // Secrets contains a list of secret Secrets []Secret `json:"secrets"` // Essential denotes whether the container is essential or not Essential bool // EntryPoint is entrypoint of the container, corresponding to docker option: --entrypoint EntryPoint *[]string // Environment is the environment variable set in the container Environment map[string]string `json:"environment"` // Overrides contains the configuration to override of a container Overrides ContainerOverrides `json:"overrides"` // DockerConfig is the configuration used to create the container DockerConfig DockerConfig `json:"dockerConfig"` // RegistryAuthentication is the auth data used to pull image RegistryAuthentication *RegistryAuthenticationData `json:"registryAuthentication"` // HealthCheckType is the mechanism to use for the container health check // currently it only supports 'DOCKER' HealthCheckType string `json:"healthCheckType,omitempty"` // Health contains the health check information of container health check Health HealthStatus `json:"-"` // LogsAuthStrategy specifies how the logs driver for the container will be // authenticated LogsAuthStrategy string // StartTimeout specifies the time value after which if a container has a dependency // on another container and the dependency conditions are 'SUCCESS', 'COMPLETE', 'HEALTHY', // then that dependency will not be resolved. StartTimeout uint // StopTimeout specifies the time value to be passed as StopContainer api call StopTimeout uint // lock is used for fields that are accessed and updated concurrently lock sync.RWMutex // DesiredStatusUnsafe represents the state where the container should go. Generally, // the desired status is informed by the ECS backend as a result of either // API calls made to ECS or decisions made by the ECS service scheduler, // though the agent may also set the DesiredStatusUnsafe if a different "essential" // container in the task exits. The DesiredStatus is almost always either // ContainerRunning or ContainerStopped. // NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `GetDesiredStatus` // and `SetDesiredStatus`. // TODO DesiredStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON // is handled properly so that the state storage continues to work. DesiredStatusUnsafe apicontainerstatus.ContainerStatus `json:"desiredStatus"` // KnownStatusUnsafe represents the state where the container is. // NOTE: Do not access `KnownStatusUnsafe` directly. Instead, use `GetKnownStatus` // and `SetKnownStatus`. // TODO KnownStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON // is handled properly so that the state storage continues to work. KnownStatusUnsafe apicontainerstatus.ContainerStatus `json:"KnownStatus"` // TransitionDependenciesMap is a map of the dependent container status to other // dependencies that must be satisfied in order for this container to transition. TransitionDependenciesMap TransitionDependenciesMap `json:"TransitionDependencySet"` // SteadyStateDependencies is a list of containers that must be in "steady state" before // this one is created // Note: Current logic requires that the containers specified here are run // before this container can even be pulled. // // Deprecated: Use TransitionDependencySet instead. SteadyStateDependencies is retained for compatibility with old // state files. SteadyStateDependencies []string `json:"RunDependencies"` // Type specifies the container type. Except the 'Normal' type, all other types // are not directly specified by task definitions, but created by the agent. The // JSON tag is retained as this field's previous name 'IsInternal' for maintaining // backwards compatibility. Please see JSON parsing hooks for this type for more // details Type ContainerType `json:"IsInternal"` // AppliedStatus is the status that has been "applied" (e.g., we've called Pull, // Create, Start, or Stop) but we don't yet know that the application was successful. // No need to save it in the state file, as agent will synchronize the container status // on restart and for some operation eg: pull, it has to be recalled again. AppliedStatus apicontainerstatus.ContainerStatus `json:"-"` // ApplyingError is an error that occurred trying to transition the container // to its desired state. It is propagated to the backend in the form // 'Name: ErrorString' as the 'reason' field. ApplyingError *apierrors.DefaultNamedError // SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS // SubmitContainerStateChange API. // TODO SentStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON is // handled properly so that the state storage continues to work. SentStatusUnsafe apicontainerstatus.ContainerStatus `json:"SentStatus"` // MetadataFileUpdated is set to true when we have completed updating the // metadata file MetadataFileUpdated bool `json:"metadataFileUpdated"` // KnownExitCodeUnsafe specifies the exit code for the container. // It is exposed outside of the package so that it's marshalled/unmarshalled in // the JSON body while saving the state. // NOTE: Do not access KnownExitCodeUnsafe directly. Instead, use `GetKnownExitCode` // and `SetKnownExitCode`. KnownExitCodeUnsafe *int `json:"KnownExitCode"` // KnownPortBindingsUnsafe is an array of port bindings for the container. KnownPortBindingsUnsafe []PortBinding `json:"KnownPortBindings"` // VolumesUnsafe is an array of volume mounts in the container. VolumesUnsafe []types.MountPoint `json:"-"` // NetworkModeUnsafe is the network mode in which the container is started NetworkModeUnsafe string `json:"-"` // NetworksUnsafe denotes the Docker Network Settings in the container. NetworkSettingsUnsafe *types.NetworkSettings `json:"-"` // SteadyStateStatusUnsafe specifies the steady state status for the container // If uninitialized, it's assumed to be set to 'ContainerRunning'. Even though // it's not only supposed to be set when the container is being created, it's // exposed outside of the package so that it's marshalled/unmarshalled in the // the JSON body while saving the state SteadyStateStatusUnsafe *apicontainerstatus.ContainerStatus `json:"SteadyStateStatus,omitempty"` createdAt time.Time startedAt time.Time finishedAt time.Time labels map[string]string } type DependsOn struct { ContainerName string `json:"containerName"` Condition string `json:"condition"` } // DockerContainer is a mapping between containers-as-docker-knows-them and // containers-as-we-know-them. // This is primarily used in DockerState, but lives here such that tasks and // containers know how to convert themselves into Docker's desired config format type DockerContainer struct { DockerID string `json:"DockerId"` DockerName string // needed for linking Container *Container } // MountPoint describes the in-container location of a Volume and references // that Volume by name. type MountPoint struct { SourceVolume string `json:"sourceVolume"` ContainerPath string `json:"containerPath"` ReadOnly bool `json:"readOnly"` } // FirelensConfig describes the type and options of a Firelens container. type FirelensConfig struct { Type string `json:"type"` Options map[string]string `json:"options"` } // VolumeFrom is a volume which references another container as its source. type VolumeFrom struct { SourceContainer string `json:"sourceContainer"` ReadOnly bool `json:"readOnly"` } // Secret contains all essential attributes needed for ECS secrets vending as environment variables/tmpfs files type Secret struct { Name string `json:"name"` ValueFrom string `json:"valueFrom"` Region string `json:"region"` ContainerPath string `json:"containerPath"` Type string `json:"type"` Provider string `json:"provider"` Target string `json:"target"` } // GetSecretResourceCacheKey returns the key required to access the secret // from the ssmsecret resource func (s *Secret) GetSecretResourceCacheKey() string { return s.ValueFrom + "_" + s.Region } // String returns a human readable string representation of DockerContainer func (dc *DockerContainer) String() string { if dc == nil { return "nil" } return fmt.Sprintf("Id: %s, Name: %s, Container: %s", dc.DockerID, dc.DockerName, dc.Container.String()) } // NewContainerWithSteadyState creates a new Container object with the specified // steady state. Containers that need the non default steady state set will // use this method instead of setting it directly func NewContainerWithSteadyState(steadyState apicontainerstatus.ContainerStatus) *Container { steadyStateStatus := steadyState return &Container{ SteadyStateStatusUnsafe: &steadyStateStatus, } } // KnownTerminal returns true if the container's known status is STOPPED func (c *Container) KnownTerminal() bool { return c.GetKnownStatus().Terminal() } // DesiredTerminal returns true if the container's desired status is STOPPED func (c *Container) DesiredTerminal() bool { return c.GetDesiredStatus().Terminal() } // GetKnownStatus returns the known status of the container func (c *Container) GetKnownStatus() apicontainerstatus.ContainerStatus { c.lock.RLock() defer c.lock.RUnlock() return c.KnownStatusUnsafe } // SetKnownStatus sets the known status of the container and update the container // applied status func (c *Container) SetKnownStatus(status apicontainerstatus.ContainerStatus) { c.lock.Lock() defer c.lock.Unlock() c.KnownStatusUnsafe = status c.updateAppliedStatusUnsafe(status) } // GetDesiredStatus gets the desired status of the container func (c *Container) GetDesiredStatus() apicontainerstatus.ContainerStatus { c.lock.RLock() defer c.lock.RUnlock() return c.DesiredStatusUnsafe } // SetDesiredStatus sets the desired status of the container func (c *Container) SetDesiredStatus(status apicontainerstatus.ContainerStatus) { c.lock.Lock() defer c.lock.Unlock() c.DesiredStatusUnsafe = status } // GetSentStatus safely returns the SentStatusUnsafe of the container func (c *Container) GetSentStatus() apicontainerstatus.ContainerStatus { c.lock.RLock() defer c.lock.RUnlock() return c.SentStatusUnsafe } // SetSentStatus safely sets the SentStatusUnsafe of the container func (c *Container) SetSentStatus(status apicontainerstatus.ContainerStatus) { c.lock.Lock() defer c.lock.Unlock() c.SentStatusUnsafe = status } // SetKnownExitCode sets exit code field in container struct func (c *Container) SetKnownExitCode(i *int) { c.lock.Lock() defer c.lock.Unlock() c.KnownExitCodeUnsafe = i } // GetKnownExitCode returns the container exit code func (c *Container) GetKnownExitCode() *int { c.lock.RLock() defer c.lock.RUnlock() return c.KnownExitCodeUnsafe } // SetRegistryAuthCredentials sets the credentials for pulling image from ECR func (c *Container) SetRegistryAuthCredentials(credential credentials.IAMRoleCredentials) { c.lock.Lock() defer c.lock.Unlock() c.RegistryAuthentication.ECRAuthData.SetPullCredentials(credential) } // ShouldPullWithExecutionRole returns whether this container has its own ECR credentials func (c *Container) ShouldPullWithExecutionRole() bool { c.lock.RLock() defer c.lock.RUnlock() return c.RegistryAuthentication != nil && c.RegistryAuthentication.Type == AuthTypeECR && c.RegistryAuthentication.ECRAuthData != nil && c.RegistryAuthentication.ECRAuthData.UseExecutionRole } // String returns a human readable string representation of this object func (c *Container) String() string { ret := fmt.Sprintf("%s(%s) (%s->%s)", c.Name, c.Image, c.GetKnownStatus().String(), c.GetDesiredStatus().String()) if c.GetKnownExitCode() != nil { ret += " - Exit: " + strconv.Itoa(*c.GetKnownExitCode()) } return ret } // GetSteadyStateStatus returns the steady state status for the container. If // Container.steadyState is not initialized, the default steady state status // defined by `defaultContainerSteadyStateStatus` is returned. The 'pause' // container's steady state differs from that of other containers, as the // 'pause' container can reach its teady state once networking resources // have been provisioned for it, which is done in the `ContainerResourcesProvisioned` // state func (c *Container) GetSteadyStateStatus() apicontainerstatus.ContainerStatus { if c.SteadyStateStatusUnsafe == nil { return defaultContainerSteadyStateStatus } return *c.SteadyStateStatusUnsafe } // IsKnownSteadyState returns true if the `KnownState` of the container equals // the `steadyState` defined for the container func (c *Container) IsKnownSteadyState() bool { knownStatus := c.GetKnownStatus() return knownStatus == c.GetSteadyStateStatus() } // GetNextKnownStateProgression returns the state that the container should // progress to based on its `KnownState`. The progression is // incremental until the container reaches its steady state. From then on, // it transitions to `ContainerStopped`. // // For example: // a. if the steady state of the container is defined as `ContainerRunning`, // the progression is: // Container: None -> Pulled -> Created -> Running* -> Stopped -> Zombie // // b. if the steady state of the container is defined as `ContainerResourcesProvisioned`, // the progression is: // Container: None -> Pulled -> Created -> Running -> Provisioned* -> Stopped -> Zombie // // c. if the steady state of the container is defined as `ContainerCreated`, // the progression is: // Container: None -> Pulled -> Created* -> Stopped -> Zombie func (c *Container) GetNextKnownStateProgression() apicontainerstatus.ContainerStatus { if c.IsKnownSteadyState() { return apicontainerstatus.ContainerStopped } return c.GetKnownStatus() + 1 } // IsInternal returns true if the container type is `ContainerCNIPause` // or `ContainerNamespacePause`. It returns false otherwise func (c *Container) IsInternal() bool { if c.Type == ContainerNormal { return false } return true } // IsRunning returns true if the container's known status is either RUNNING // or RESOURCES_PROVISIONED. It returns false otherwise func (c *Container) IsRunning() bool { return c.GetKnownStatus().IsRunning() } // IsMetadataFileUpdated returns true if the metadata file has been once the // metadata file is ready and will no longer change func (c *Container) IsMetadataFileUpdated() bool { c.lock.RLock() defer c.lock.RUnlock() return c.MetadataFileUpdated } // SetMetadataFileUpdated sets the container's MetadataFileUpdated status to true func (c *Container) SetMetadataFileUpdated() { c.lock.Lock() defer c.lock.Unlock() c.MetadataFileUpdated = true } // IsEssential returns whether the container is an essential container or not func (c *Container) IsEssential() bool { c.lock.RLock() defer c.lock.RUnlock() return c.Essential } // AWSLogAuthExecutionRole returns true if the auth is by execution role func (c *Container) AWSLogAuthExecutionRole() bool { return c.LogsAuthStrategy == awslogsAuthExecutionRole } // SetCreatedAt sets the timestamp for container's creation time func (c *Container) SetCreatedAt(createdAt time.Time) { if createdAt.IsZero() { return } c.lock.Lock() defer c.lock.Unlock() c.createdAt = createdAt } // SetStartedAt sets the timestamp for container's start time func (c *Container) SetStartedAt(startedAt time.Time) { if startedAt.IsZero() { return } c.lock.Lock() defer c.lock.Unlock() c.startedAt = startedAt } // SetFinishedAt sets the timestamp for container's stopped time func (c *Container) SetFinishedAt(finishedAt time.Time) { if finishedAt.IsZero() { return } c.lock.Lock() defer c.lock.Unlock() c.finishedAt = finishedAt } // GetCreatedAt sets the timestamp for container's creation time func (c *Container) GetCreatedAt() time.Time { c.lock.RLock() defer c.lock.RUnlock() return c.createdAt } // GetStartedAt sets the timestamp for container's start time func (c *Container) GetStartedAt() time.Time { c.lock.RLock() defer c.lock.RUnlock() return c.startedAt } // GetFinishedAt sets the timestamp for container's stopped time func (c *Container) GetFinishedAt() time.Time { c.lock.RLock() defer c.lock.RUnlock() return c.finishedAt } // SetLabels sets the labels for a container func (c *Container) SetLabels(labels map[string]string) { c.lock.Lock() defer c.lock.Unlock() c.labels = labels } // SetRuntimeID sets the DockerID for a container func (c *Container) SetRuntimeID(RuntimeID string) { c.lock.Lock() defer c.lock.Unlock() c.RuntimeID = RuntimeID } // GetRuntimeID gets the DockerID for a container func (c *Container) GetRuntimeID() string { c.lock.RLock() defer c.lock.RUnlock() return c.RuntimeID } // GetLabels gets the labels for a container func (c *Container) GetLabels() map[string]string { c.lock.RLock() defer c.lock.RUnlock() return c.labels } // SetKnownPortBindings sets the ports for a container func (c *Container) SetKnownPortBindings(ports []PortBinding) { c.lock.Lock() defer c.lock.Unlock() c.KnownPortBindingsUnsafe = ports } // GetKnownPortBindings gets the ports for a container func (c *Container) GetKnownPortBindings() []PortBinding { c.lock.RLock() defer c.lock.RUnlock() return c.KnownPortBindingsUnsafe } // SetVolumes sets the volumes mounted in a container func (c *Container) SetVolumes(volumes []types.MountPoint) { c.lock.Lock() defer c.lock.Unlock() c.VolumesUnsafe = volumes } // GetVolumes returns the volumes mounted in a container func (c *Container) GetVolumes() []types.MountPoint { c.lock.RLock() defer c.lock.RUnlock() return c.VolumesUnsafe } // SetNetworkSettings sets the networks field in a container func (c *Container) SetNetworkSettings(networks *types.NetworkSettings) { c.lock.Lock() defer c.lock.Unlock() c.NetworkSettingsUnsafe = networks } // GetNetworkSettings returns the networks field in a container func (c *Container) GetNetworkSettings() *types.NetworkSettings { c.lock.RLock() defer c.lock.RUnlock() return c.NetworkSettingsUnsafe } // SetNetworkMode sets the network mode of the container func (c *Container) SetNetworkMode(networkMode string) { c.lock.Lock() defer c.lock.Unlock() c.NetworkModeUnsafe = networkMode } // GetNetworkMode returns the network mode of the container func (c *Container) GetNetworkMode() string { c.lock.RLock() defer c.lock.RUnlock() return c.NetworkModeUnsafe } // HealthStatusShouldBeReported returns true if the health check is defined in // the task definition func (c *Container) HealthStatusShouldBeReported() bool { return c.HealthCheckType == DockerHealthCheckType } // SetHealthStatus sets the container health status func (c *Container) SetHealthStatus(health HealthStatus) { c.lock.Lock() defer c.lock.Unlock() if c.Health.Status == health.Status { return } c.Health.Status = health.Status c.Health.Since = aws.Time(time.Now()) c.Health.Output = health.Output // Set the health exit code if the health check failed if c.Health.Status == apicontainerstatus.ContainerUnhealthy { c.Health.ExitCode = health.ExitCode } } // GetHealthStatus returns the container health information func (c *Container) GetHealthStatus() HealthStatus { c.lock.RLock() defer c.lock.RUnlock() // Copy the pointer to avoid race condition copyHealth := c.Health if c.Health.Since != nil { copyHealth.Since = aws.Time(aws.TimeValue(c.Health.Since)) } return copyHealth } // BuildContainerDependency adds a new dependency container and satisfied status // to the dependent container func (c *Container) BuildContainerDependency(contName string, satisfiedStatus apicontainerstatus.ContainerStatus, dependentStatus apicontainerstatus.ContainerStatus) { contDep := ContainerDependency{ ContainerName: contName, SatisfiedStatus: satisfiedStatus, } if _, ok := c.TransitionDependenciesMap[dependentStatus]; !ok { c.TransitionDependenciesMap[dependentStatus] = TransitionDependencySet{} } deps := c.TransitionDependenciesMap[dependentStatus] deps.ContainerDependencies = append(deps.ContainerDependencies, contDep) c.TransitionDependenciesMap[dependentStatus] = deps } // BuildResourceDependency adds a new resource dependency by taking in the required status // of the resource that satisfies the dependency and the dependent container status, // whose transition is dependent on the resource. // example: if container's PULLED transition is dependent on volume resource's // CREATED status, then RequiredStatus=VolumeCreated and dependentStatus=ContainerPulled func (c *Container) BuildResourceDependency(resourceName string, requiredStatus resourcestatus.ResourceStatus, dependentStatus apicontainerstatus.ContainerStatus) { resourceDep := ResourceDependency{ Name: resourceName, RequiredStatus: requiredStatus, } if _, ok := c.TransitionDependenciesMap[dependentStatus]; !ok { c.TransitionDependenciesMap[dependentStatus] = TransitionDependencySet{} } deps := c.TransitionDependenciesMap[dependentStatus] deps.ResourceDependencies = append(deps.ResourceDependencies, resourceDep) c.TransitionDependenciesMap[dependentStatus] = deps } // updateAppliedStatusUnsafe updates the container transitioning status func (c *Container) updateAppliedStatusUnsafe(knownStatus apicontainerstatus.ContainerStatus) { if c.AppliedStatus == apicontainerstatus.ContainerStatusNone { return } // Check if the container transition has already finished if c.AppliedStatus <= knownStatus { c.AppliedStatus = apicontainerstatus.ContainerStatusNone } } // SetAppliedStatus sets the applied status of container and returns whether // the container is already in a transition func (c *Container) SetAppliedStatus(status apicontainerstatus.ContainerStatus) bool { c.lock.Lock() defer c.lock.Unlock() if c.AppliedStatus != apicontainerstatus.ContainerStatusNone { // return false to indicate the set operation failed return false } c.AppliedStatus = status return true } // GetAppliedStatus returns the transitioning status of container func (c *Container) GetAppliedStatus() apicontainerstatus.ContainerStatus { c.lock.RLock() defer c.lock.RUnlock() return c.AppliedStatus } // ShouldPullWithASMAuth returns true if this container needs to retrieve // private registry authentication data from ASM func (c *Container) ShouldPullWithASMAuth() bool { c.lock.RLock() defer c.lock.RUnlock() return c.RegistryAuthentication != nil && c.RegistryAuthentication.Type == AuthTypeASM && c.RegistryAuthentication.ASMAuthData != nil } // SetASMDockerAuthConfig add the docker auth config data to the // RegistryAuthentication struct held by the container, this is then passed down // to the docker client to pull the image func (c *Container) SetASMDockerAuthConfig(dac types.AuthConfig) { c.RegistryAuthentication.ASMAuthData.SetDockerAuthConfig(dac) } // SetV3EndpointID sets the v3 endpoint id of container func (c *Container) SetV3EndpointID(v3EndpointID string) { c.lock.Lock() defer c.lock.Unlock() c.V3EndpointID = v3EndpointID } // GetV3EndpointID returns the v3 endpoint id of container func (c *Container) GetV3EndpointID() string { c.lock.RLock() defer c.lock.RUnlock() return c.V3EndpointID } // InjectV3MetadataEndpoint injects the v3 metadata endpoint as an environment variable for a container func (c *Container) InjectV3MetadataEndpoint() { c.lock.Lock() defer c.lock.Unlock() // don't assume that the environment variable map has been initialized by others if c.Environment == nil { c.Environment = make(map[string]string) } c.Environment[MetadataURIEnvironmentVariableName] = fmt.Sprintf(MetadataURIFormat, c.V3EndpointID) } // ShouldCreateWithSSMSecret returns true if this container needs to get secret // value from SSM Parameter Store func (c *Container) ShouldCreateWithSSMSecret() bool { c.lock.RLock() defer c.lock.RUnlock() // Secrets field will be nil if there is no secrets for container if c.Secrets == nil { return false } for _, secret := range c.Secrets { if secret.Provider == SecretProviderSSM { return true } } return false } // ShouldCreateWithASMSecret returns true if this container needs to get secret // value from AWS Secrets Manager func (c *Container) ShouldCreateWithASMSecret() bool { c.lock.RLock() defer c.lock.RUnlock() // Secrets field will be nil if there is no secrets for container if c.Secrets == nil { return false } for _, secret := range c.Secrets { if secret.Provider == SecretProviderASM { return true } } return false } // MergeEnvironmentVariables appends additional envVarName:envVarValue pairs to // the the container's enviornment values structure func (c *Container) MergeEnvironmentVariables(envVars map[string]string) { c.lock.Lock() defer c.lock.Unlock() // don't assume that the environment variable map has been initialized by others if c.Environment == nil { c.Environment = make(map[string]string) } for k, v := range envVars { c.Environment[k] = v } } // HasSecret returns whether a container has secret based on a certain condition. func (c *Container) HasSecret(f func(s Secret) bool) bool { c.lock.RLock() defer c.lock.RUnlock() if c.Secrets == nil { return false } for _, secret := range c.Secrets { if f(secret) { return true } } return false } func (c *Container) GetStartTimeout() time.Duration { c.lock.Lock() defer c.lock.Unlock() return time.Duration(c.StartTimeout) * time.Second } func (c *Container) GetStopTimeout() time.Duration { c.lock.Lock() defer c.lock.Unlock() return time.Duration(c.StopTimeout) * time.Second } func (c *Container) GetDependsOn() []DependsOn { c.lock.RLock() defer c.lock.RUnlock() return c.DependsOnUnsafe } func (c *Container) SetDependsOn(dependsOn []DependsOn) { c.lock.Lock() defer c.lock.Unlock() c.DependsOnUnsafe = dependsOn } // DependsOnContainer checks whether a container depends on another container. func (c *Container) DependsOnContainer(name string) bool { c.lock.RLock() defer c.lock.RUnlock() for _, dependsOn := range c.DependsOnUnsafe { if dependsOn.ContainerName == name { return true } } return false } // HasContainerDependencies checks whether a container has any container dependency. func (c *Container) HasContainerDependencies() bool { c.lock.RLock() defer c.lock.RUnlock() return len(c.DependsOnUnsafe) != 0 } // AddContainerDependency adds a container dependency to a container. func (c *Container) AddContainerDependency(name string, condition string) { c.lock.Lock() defer c.lock.Unlock() c.DependsOnUnsafe = append(c.DependsOnUnsafe, DependsOn{ ContainerName: name, Condition: condition, }) } // GetLogDriver returns the log driver used by the container. func (c *Container) GetLogDriver() string { c.lock.RLock() defer c.lock.RUnlock() if c.DockerConfig.HostConfig == nil { return "" } hostConfig := &dockercontainer.HostConfig{} err := json.Unmarshal([]byte(*c.DockerConfig.HostConfig), hostConfig) if err != nil { seelog.Warnf("Encountered error when trying to get log driver for container %s: %v", err) return "" } return hostConfig.LogConfig.Type } // GetHostConfig returns the container's host config. func (c *Container) GetHostConfig() *string { c.lock.RLock() defer c.lock.RUnlock() return c.DockerConfig.HostConfig } // GetFirelensConfig returns the container's firelens config. func (c *Container) GetFirelensConfig() *FirelensConfig { c.lock.RLock() defer c.lock.RUnlock() return c.FirelensConfig }
1
23,337
this may require a statefile version update
aws-amazon-ecs-agent
go
@@ -4,6 +4,9 @@ namespace Shopsys\FrameworkBundle\Component\Csv; use Symfony\Component\Filesystem\Exception\FileNotFoundException; +/** + * @deprecated Class is obsolete and will be removed in the next major. Use SplFileObject::fgetcsv() instead. + */ class CsvReader { /**
1
<?php namespace Shopsys\FrameworkBundle\Component\Csv; use Symfony\Component\Filesystem\Exception\FileNotFoundException; class CsvReader { /** * @param string $filename * @param string $delimiter * @return array */ public function getRowsFromCsv($filename, $delimiter = ';') { if (!file_exists($filename) || !is_readable($filename)) { throw new FileNotFoundException(); } $rows = []; $handle = fopen($filename, 'r'); if ($handle === false) { return $rows; } do { $row = fgetcsv($handle, 0, $delimiter); if ($row === false) { break; } $rows[] = $row; } while (true); fclose($handle); return $rows; } }
1
24,103
I like when `@deprecated` contains how to resolve it - eg. "use `SplFileObject::fgetcsv()` instead". The information in upgrade notes is usefull only during upgrade.
shopsys-shopsys
php
@@ -7,8 +7,8 @@ package mock import ( gomock "github.com/golang/mock/gomock" gcpclient "github.com/openshift/hive/pkg/gcpclient" - v1 "google.golang.org/api/compute/v1" - v10 "google.golang.org/api/dns/v1" + compute "google.golang.org/api/compute/v1" + dns "google.golang.org/api/dns/v1" reflect "reflect" )
1
// Code generated by MockGen. DO NOT EDIT. // Source: ./client.go // Package mock is a generated GoMock package. package mock import ( gomock "github.com/golang/mock/gomock" gcpclient "github.com/openshift/hive/pkg/gcpclient" v1 "google.golang.org/api/compute/v1" v10 "google.golang.org/api/dns/v1" reflect "reflect" ) // MockClient is a mock of Client interface type MockClient struct { ctrl *gomock.Controller recorder *MockClientMockRecorder } // MockClientMockRecorder is the mock recorder for MockClient type MockClientMockRecorder struct { mock *MockClient } // NewMockClient creates a new mock instance func NewMockClient(ctrl *gomock.Controller) *MockClient { mock := &MockClient{ctrl: ctrl} mock.recorder = &MockClientMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use func (m *MockClient) EXPECT() *MockClientMockRecorder { return m.recorder } // ListManagedZones mocks base method func (m *MockClient) ListManagedZones(opts gcpclient.ListManagedZonesOptions) (*v10.ManagedZonesListResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListManagedZones", opts) ret0, _ := ret[0].(*v10.ManagedZonesListResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // ListManagedZones indicates an expected call of ListManagedZones func (mr *MockClientMockRecorder) ListManagedZones(opts interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListManagedZones", reflect.TypeOf((*MockClient)(nil).ListManagedZones), opts) } // ListResourceRecordSets mocks base method func (m *MockClient) ListResourceRecordSets(managedZone string, opts gcpclient.ListResourceRecordSetsOptions) (*v10.ResourceRecordSetsListResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListResourceRecordSets", managedZone, opts) ret0, _ := ret[0].(*v10.ResourceRecordSetsListResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // ListResourceRecordSets indicates an expected call of ListResourceRecordSets func (mr *MockClientMockRecorder) ListResourceRecordSets(managedZone, opts interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListResourceRecordSets", reflect.TypeOf((*MockClient)(nil).ListResourceRecordSets), managedZone, opts) } // AddResourceRecordSet mocks base method func (m *MockClient) AddResourceRecordSet(managedZone string, recordSet *v10.ResourceRecordSet) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AddResourceRecordSet", managedZone, recordSet) ret0, _ := ret[0].(error) return ret0 } // AddResourceRecordSet indicates an expected call of AddResourceRecordSet func (mr *MockClientMockRecorder) AddResourceRecordSet(managedZone, recordSet interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddResourceRecordSet", reflect.TypeOf((*MockClient)(nil).AddResourceRecordSet), managedZone, recordSet) } // DeleteResourceRecordSet mocks base method func (m *MockClient) DeleteResourceRecordSet(managedZone string, recordSet *v10.ResourceRecordSet) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteResourceRecordSet", managedZone, recordSet) ret0, _ := ret[0].(error) return ret0 } // DeleteResourceRecordSet indicates an expected call of DeleteResourceRecordSet func (mr *MockClientMockRecorder) DeleteResourceRecordSet(managedZone, recordSet interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourceRecordSet", reflect.TypeOf((*MockClient)(nil).DeleteResourceRecordSet), managedZone, recordSet) } // DeleteResourceRecordSets mocks base method func (m *MockClient) DeleteResourceRecordSets(managedZone string, recordSet []*v10.ResourceRecordSet) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteResourceRecordSets", managedZone, recordSet) ret0, _ := ret[0].(error) return ret0 } // DeleteResourceRecordSets indicates an expected call of DeleteResourceRecordSets func (mr *MockClientMockRecorder) DeleteResourceRecordSets(managedZone, recordSet interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourceRecordSets", reflect.TypeOf((*MockClient)(nil).DeleteResourceRecordSets), managedZone, recordSet) } // UpdateResourceRecordSet mocks base method func (m *MockClient) UpdateResourceRecordSet(managedZone string, addRecordSet, removeRecordSet *v10.ResourceRecordSet) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UpdateResourceRecordSet", managedZone, addRecordSet, removeRecordSet) ret0, _ := ret[0].(error) return ret0 } // UpdateResourceRecordSet indicates an expected call of UpdateResourceRecordSet func (mr *MockClientMockRecorder) UpdateResourceRecordSet(managedZone, addRecordSet, removeRecordSet interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateResourceRecordSet", reflect.TypeOf((*MockClient)(nil).UpdateResourceRecordSet), managedZone, addRecordSet, removeRecordSet) } // GetManagedZone mocks base method func (m *MockClient) GetManagedZone(managedZone string) (*v10.ManagedZone, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetManagedZone", managedZone) ret0, _ := ret[0].(*v10.ManagedZone) ret1, _ := ret[1].(error) return ret0, ret1 } // GetManagedZone indicates an expected call of GetManagedZone func (mr *MockClientMockRecorder) GetManagedZone(managedZone interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetManagedZone", reflect.TypeOf((*MockClient)(nil).GetManagedZone), managedZone) } // CreateManagedZone mocks base method func (m *MockClient) CreateManagedZone(managedZone *v10.ManagedZone) (*v10.ManagedZone, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateManagedZone", managedZone) ret0, _ := ret[0].(*v10.ManagedZone) ret1, _ := ret[1].(error) return ret0, ret1 } // CreateManagedZone indicates an expected call of CreateManagedZone func (mr *MockClientMockRecorder) CreateManagedZone(managedZone interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateManagedZone", reflect.TypeOf((*MockClient)(nil).CreateManagedZone), managedZone) } // DeleteManagedZone mocks base method func (m *MockClient) DeleteManagedZone(managedZone string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteManagedZone", managedZone) ret0, _ := ret[0].(error) return ret0 } // DeleteManagedZone indicates an expected call of DeleteManagedZone func (mr *MockClientMockRecorder) DeleteManagedZone(managedZone interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteManagedZone", reflect.TypeOf((*MockClient)(nil).DeleteManagedZone), managedZone) } // ListComputeZones mocks base method func (m *MockClient) ListComputeZones(arg0 gcpclient.ListComputeZonesOptions) (*v1.ZoneList, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListComputeZones", arg0) ret0, _ := ret[0].(*v1.ZoneList) ret1, _ := ret[1].(error) return ret0, ret1 } // ListComputeZones indicates an expected call of ListComputeZones func (mr *MockClientMockRecorder) ListComputeZones(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListComputeZones", reflect.TypeOf((*MockClient)(nil).ListComputeZones), arg0) } // ListComputeImages mocks base method func (m *MockClient) ListComputeImages(arg0 gcpclient.ListComputeImagesOptions) (*v1.ImageList, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListComputeImages", arg0) ret0, _ := ret[0].(*v1.ImageList) ret1, _ := ret[1].(error) return ret0, ret1 } // ListComputeImages indicates an expected call of ListComputeImages func (mr *MockClientMockRecorder) ListComputeImages(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListComputeImages", reflect.TypeOf((*MockClient)(nil).ListComputeImages), arg0) } // ListComputeInstances mocks base method func (m *MockClient) ListComputeInstances(arg0 gcpclient.ListComputeInstancesOptions, arg1 func(*v1.InstanceAggregatedList) error) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListComputeInstances", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // ListComputeInstances indicates an expected call of ListComputeInstances func (mr *MockClientMockRecorder) ListComputeInstances(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListComputeInstances", reflect.TypeOf((*MockClient)(nil).ListComputeInstances), arg0, arg1) } // StopInstance mocks base method func (m *MockClient) StopInstance(arg0 *v1.Instance) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StopInstance", arg0) ret0, _ := ret[0].(error) return ret0 } // StopInstance indicates an expected call of StopInstance func (mr *MockClientMockRecorder) StopInstance(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopInstance", reflect.TypeOf((*MockClient)(nil).StopInstance), arg0) } // StartInstance mocks base method func (m *MockClient) StartInstance(arg0 *v1.Instance) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StartInstance", arg0) ret0, _ := ret[0].(error) return ret0 } // StartInstance indicates an expected call of StartInstance func (mr *MockClientMockRecorder) StartInstance(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartInstance", reflect.TypeOf((*MockClient)(nil).StartInstance), arg0) }
1
14,805
Nice to see that your local environment is up-to-date now for these changes.
openshift-hive
go
@@ -1268,7 +1268,13 @@ var AppRouter = Backbone.Router.extend({ } // Init sidebar based on the current url - self.sidebar.init(); + if (countlyVue.sideBarComponent) { + var selectedMenuItem = countlyVue.sideBarComponent.$store.getters["countlySidebar/getSelectedMenuItem"]; + var currLink = "#" + Backbone.history.fragment; + if (selectedMenuItem.item.url !== currLink) { + countlyVue.sideBarComponent.$store.dispatch("countlySidebar/updateSelectedMenuItem", null); + } + } }, sidebar: { init: function() {
1
/* global Backbone, countlyAuth, Handlebars, countlyEvent, countlyCommon, countlyGlobal, CountlyHelpers, countlySession, moment, Drop, _, store, countlyLocation, jQuery, $, T, countlyTaskManager, countlyVue*/ /** * Default Backbone View template from which all countly views should inherit. * A countly view is defined as a page corresponding to a url fragment such * as #/manage/apps. This interface defines common functions or properties * the view object has. A view may override any function or property. * @name countlyView * @global * @namespace countlyView * @example <caption>Extending default view and overwriting its methods</caption> * window.DashboardView = countlyView.extend({ * renderCommon:function () { * if(countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID]){ * var type = countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type; * type = jQuery.i18n.map["management-applications.types."+type] || type; * $(this.el).html("<div id='no-app-type'><h1>"+jQuery.i18n.map["common.missing-type"]+": "+type+"</h1></div>"); * } * else{ * $(this.el).html("<div id='no-app-type'><h1>"+jQuery.i18n.map["management-applications.no-app-warning"]+"</h1></div>"); * } * } * }); */ var countlyView = Backbone.View.extend({ /** * Checking state of view, if it is loaded * @type {boolean} * @instance * @memberof countlyView */ isLoaded: false, /** * Handlebar template * @type {object} * @instance * @memberof countlyView */ template: null, //handlebars template of the view /** * Data to pass to Handlebar template when building it * @type {object} * @instance * @memberof countlyView */ templateData: {}, //data to be used while rendering the template /** * Main container which contents to replace by compiled template * @type {jquery_object} * @instance * @memberof countlyView */ el: $('#content'), //jquery element to render view into _myRequests: {}, //save requests called for this view /** * Initialize view, overwrite it with at least empty function if you are using some custom remote template * @memberof countlyView * @instance */ initialize: function() { //compile view template this.template = Handlebars.compile($("#template-analytics-common").html()); }, _removeMyRequests: function() { for (var url in this._myRequests) { for (var data in this._myRequests[url]) { //4 means done, less still in progress if (parseInt(this._myRequests[url][data].readyState) !== 4) { this._myRequests[url][data].abort(); } } } this._myRequests = {}; }, /** * This method is called when date is changed, default behavior is to call refresh method of the view * @memberof countlyView * @instance */ dateChanged: function() { //called when user changes the date selected if (Backbone.history.fragment === "/") { this.refresh(true); } else { this.refresh(); } }, /** * This method is called when app is changed, default behavior is to reset preloaded data as events * @param {function=} callback - callback function * @memberof countlyView * @instance */ appChanged: function(callback) { //called when user changes selected app from the sidebar countlyEvent.reset(); $.when(countlyEvent.initialize()).always(function() { if (callback) { callback(); } }); }, /** * This method is called before calling render, load your data and remote template if needed here * @returns {boolean} true * @memberof countlyView * @instance * @example *beforeRender: function() { * var self = this; * return $.when(T.render('/density/templates/density.html', function(src){ * self.template = src; * }), countlyDeviceDetails.initialize(), countlyTotalUsers.initialize("densities"), countlyDensity.initialize()).then(function () {}); *} */ beforeRender: function() { return true; }, /** * This method is called after calling render method * @memberof countlyView * @instance */ afterRender: function() { CountlyHelpers.makeSelectNative(); }, /** * Main render method, better not to over write it, but use {@link countlyView.renderCommon} instead * @returns {object} this * @memberof countlyView * @instance */ render: function() { //backbone.js view render function // var currLink = Backbone.history.fragment; // // Reset any active views and dropdowns // $("#main-views-container").find(".main-view").removeClass("active"); // $("#top-bar").find(".dropdown.active").removeClass("active"); // // Activate the main view and dropdown based on the active view // if (/^\/custom/.test(currLink) === true) { // $("#dashboards-main-view").addClass("active"); // $("#dashboard-selection").addClass("active"); // } // else { // $("#analytics-main-view").addClass("active"); // $("#app-navigation").addClass("active"); // } $("#content-top").html(""); this.el.html(''); if (countlyCommon.ACTIVE_APP_ID) { var self = this; $.when(this.beforeRender(), initializeOnce()).fail(function(XMLHttpRequest, textStatus, errorThrown) { if (XMLHttpRequest && XMLHttpRequest.status === 0) { // eslint-disable-next-line no-console console.error("Check Your Network Connection"); } else if (XMLHttpRequest && XMLHttpRequest.status === 404) { // eslint-disable-next-line no-console console.error("Requested URL not found: " + XMLHttpRequest.my_set_url + " with " + JSON.stringify(XMLHttpRequest.my_set_data)); } else if (XMLHttpRequest && XMLHttpRequest.status === 500) { // eslint-disable-next-line no-console console.error("Internel Server Error: " + XMLHttpRequest.my_set_url + " with " + JSON.stringify(XMLHttpRequest.my_set_data)); } else if ((XMLHttpRequest && typeof XMLHttpRequest.status === "undefined") || errorThrown) { // eslint-disable-next-line no-console console.error("Unknow Error: "); if (XMLHttpRequest) { // eslint-disable-next-line no-console console.log(XMLHttpRequest.my_set_url + " with " + JSON.stringify(XMLHttpRequest.my_set_data) + "\n" + (XMLHttpRequest.responseText) + "\n"); } // eslint-disable-next-line no-console console.error(textStatus + "\n" + errorThrown); } }) .always(function() { if (app.activeView === self) { self.isLoaded = true; self.renderCommon(); self.afterRender(); app.pageScript(); } }); } else { if (app.activeView === this) { this.isLoaded = true; this.renderCommon(); this.afterRender(); app.pageScript(); } } /* Vue update - remove following if (countlyGlobal.member.member_image) { $('.member_image').html(""); $('.member_image').css({'background-image': 'url(' + countlyGlobal.member.member_image + '?now=' + Date.now() + ')', 'background-size': '100%'}); } else { var defaultAvatarSelector = countlyGlobal.member.created_at % 16 * 30; var name = countlyGlobal.member.full_name.split(" "); $('.member_image').css({'background-image': 'url("images/avatar-sprite.png")', 'background-position': defaultAvatarSelector + 'px', 'background-size': '510px 30px', 'text-align': 'center'}); $('.member_image').html(""); $('.member_image').prepend('<span style="text-style: uppercase;color: white;position: relative; top: 6px; font-size: 16px;">' + name[0][0] + name[name.length - 1][0] + '</span>'); } // Top bar dropdowns are hidden by default, fade them in when view render is complete $("#top-bar").find(".dropdown").fadeIn(2000); */ return this; }, /** * Do all your rendering in this method * @param {boolean} isRefresh - render is called from refresh method, so do not need to do initialization * @memberof countlyView * @instance * @example *renderCommon:function (isRefresh) { * //set initial data for template * this.templateData = { * "page-title":jQuery.i18n.map["density.title"], * "logo-class":"densities", * "chartHTML": chartHTML, * }; * * if (!isRefresh) { * //populate template with data and add to html * $(this.el).html(this.template(this.templateData)); * } *} */ renderCommon: function(/* isRefresh*/) {}, // common render function of the view /** * Called when view is refreshed, you can reload data here or call {@link countlyView.renderCommon} with parameter true for code reusability * @returns {boolean} true * @memberof countlyView * @instance * @example * refresh:function () { * var self = this; * //reload data from beforeRender method * $.when(this.beforeRender()).then(function () { * if (app.activeView != self) { * return false; * } * //re render data again * self.renderCommon(true); * * //replace some parts manually from templateData * var newPage = $("<div>" + self.template(self.templateData) + "</div>"); * $(self.el).find(".widget-content").replaceWith(newPage.find(".widget-content")); * $(self.el).find(".dashboard-summary").replaceWith(newPage.find(".dashboard-summary")); * $(self.el).find(".density-widget").replaceWith(newPage.find(".density-widget")); * }); *} */ refresh: function() { // resfresh function for the view called every 10 seconds by default return true; }, /** * This method is called when user is active after idle period * @memberof countlyView * @instance */ restart: function() { // triggered when user is active after idle period this.refresh(); }, /** * This method is called when view is destroyed (user entered inactive state or switched to other view) you can clean up here if there is anything to be cleaned * @memberof countlyView * @instance */ destroy: function() { } }); /** * View class to expand by plugins which need configuration under Management->Applications. * @name countlyManagementView * @global * @namespace countlyManagementView */ window.countlyManagementView = countlyView.extend({ /** * Handy function which returns currently saved configuration of this plugin or empty object. * @memberof countlyManagementView * @return {Object} app object */ config: function() { return countlyGlobal.apps[this.appId] && countlyGlobal.apps[this.appId].plugins && countlyGlobal.apps[this.appId].plugins[this.plugin] || {}; }, /** * Set current app id * @memberof countlyManagementView * @param {string} appId - app Id to set */ setAppId: function(appId) { if (appId !== this.appId) { this.appId = appId; this.resetTemplateData(); this.savedTemplateData = JSON.stringify(this.templateData); } }, /** * Reset template data when changing app * @memberof countlyManagementView */ resetTemplateData: function() { this.templateData = {}; }, /** * Title of plugin configuration tab, override with your own title. * @memberof countlyManagementView * @return {String} tab title */ titleString: function() { return 'Default plugin configuration'; }, /** * Saving string displayed when request takes more than 0.3 seconds, override if needed. * @memberof countlyManagementView * @return {String} saving string */ savingString: function() { return 'Saving...'; }, /** * Callback function called before tab is expanded. Override if needed. * @memberof countlyManagementView */ beforeExpand: function() {}, /** * Callback function called after tab is collapsed. Override if needed. * @memberof countlyManagementView */ afterCollapse: function() {}, /** * Function used to determine whether save button should be visible. Used whenever UI is redrawn or some value changed. Override if needed. * @memberof countlyManagementView * @return {Boolean} true if enabled */ isSaveAvailable: function() { return JSON.stringify(this.templateData) !== this.savedTemplateData.toString(); }, /** * Callback function called to apply changes. Override if validation is needed. * @memberof countlyManagementView * @return {String} error to display to user if validation didn't pass */ validate: function() { return null; }, /** * Function which prepares data to the format required by the server, must return a Promise. * @memberof countlyManagementView * @return {Promise} which resolves to object of {plugin-name: {config: true, options: true}} format or rejects with error string otherwise */ prepare: function() { var o = {}; o[this.plugin] = this.templateData; return $.when(o); }, /** * Show error message returned by server or by validate function. Override if needed. * @memberof countlyManagementView * @param {string} error - error message to show */ showError: function(error) { CountlyHelpers.alert(error, 'popStyleGreen', {title: jQuery.i18n.map['management-applications.plugins.smth'], image: 'empty-icon', button_title: jQuery.i18n.map['management-applications.plugins.ok']}); }, /** * Called whenever element value with name in parameter have been changed. Override if needed. * @memberof countlyManagementView */ onChange: function(/* name */) { }, /** * Called whenever element value with name in parameter have been changed. * @memberof countlyManagementView * @param {string} name - key * @param {string} value - value to set */ doOnChange: function(name, value) { if (name && countlyCommon.dot(this.templateData, name) !== value) { countlyCommon.dot(this.templateData, name, value); } if (this.isSaveAvailable()) { this.el.parent().find("h3[aria-controls=" + this.el.attr("id") + "]").find('.icon-button').show(); } else { this.el.parent().find("h3[aria-controls=" + this.el.attr("id") + "]").find('.icon-button').hide(); } if (name) { this.onChange(name, value); } }, /** * Save logic: validate, disable save button, submit to the server, * show loading dialog if it takes long enough, hide it when done, show error if any, enable save button. * @memberof countlyManagementView * @param {event} ev - event * @returns {object} error */ save: function(ev) { ev.preventDefault(); ev.stopPropagation(); if (this.el.parent().find("h3[aria-controls=" + this.el.attr("id") + "]").find('.icon-button').hasClass('disabled') || !this.isSaveAvailable()) { return; } var error = this.validate(), self = this; if (error) { return this.showError(error === true ? jQuery.i18n.map['management-applications.plugins.save.nothing'] : error); } this.el.parent().find("h3[aria-controls=" + this.el.attr("id") + "]").find('.icon-button').addClass('disabled'); this.prepare().then(function(data) { var dialog, timeout = setTimeout(function() { dialog = CountlyHelpers.loading(jQuery.i18n.map['management-applications.plugins.saving']); }, 300); $.ajax({ type: "POST", url: countlyCommon.API_PARTS.apps.w + '/update/plugins', data: { app_id: self.appId, args: JSON.stringify(data) }, dataType: "json", success: function(result) { self.el.parent().find("h3[aria-controls=" + self.el.attr("id") + "]").find('.icon-button').removeClass('disabled'); clearTimeout(timeout); if (dialog) { CountlyHelpers.removeDialog(dialog); } if (result.result === 'Nothing changed') { CountlyHelpers.notify({type: 'warning', message: jQuery.i18n.map['management-applications.plugins.saved.nothing']}); } else { CountlyHelpers.notify({title: jQuery.i18n.map['management-applications.plugins.saved.title'], message: jQuery.i18n.map['management-applications.plugins.saved']}); if (!countlyGlobal.apps[result._id].plugins) { countlyGlobal.apps[result._id].plugins = {}; } self.savedTemplateData = JSON.stringify(self.templateData); for (var k in result.plugins) { countlyGlobal.apps[result._id].plugins[k] = result.plugins[k]; } self.resetTemplateData(); self.render(); } self.doOnChange(); }, error: function(resp) { try { resp = JSON.parse(resp.responseText); } catch (ignored) { //ignored excep } self.el.parent().find("h3[aria-controls=" + self.el.attr("id") + "]").removeClass('disabled'); clearTimeout(timeout); if (dialog) { CountlyHelpers.removeDialog(dialog); } self.showError(resp.result || jQuery.i18n.map['management-applications.plugins.error.server']); } }); }, function(error1) { self.el.parent().find("h3[aria-controls=" + self.el.attr("id") + "]").removeClass('disabled'); self.showError(error1); }); }, beforeRender: function() { var self = this; if (this.templatePath && this.templatePath !== "") { return $.when(T.render(this.templatePath, function(src) { self.template = src; })); } else { return; } }, render: function() { //backbone.js view render function var self = this; if (!this.savedTemplateData) { this.savedTemplateData = JSON.stringify(this.templateData); } this.el.html(this.template(this.templateData)); if (!this.el.parent().find("h3[aria-controls=" + this.el.attr("id") + "]").find('.icon-button').length) { setTimeout(function() { $('<a class="icon-button green" data-localize="management-applications.plugins.save" href="#">Save</a>').hide().appendTo(self.el.parent().find("h3[aria-controls=" + self.el.attr("id") + "]")); }); } this.el.find('.cly-select').each(function(i, select) { $(select).off('click', '.item').on('click', '.item', function() { self.doOnChange($(select).data('name') || $(select).attr('id'), $(this).data('value')); }); }); this.el.find(' input[type=number]').off('input').on('input', function() { self.doOnChange($(this).attr('name') || $(this).attr('id'), parseFloat($(this).val())); }); this.el.find('input[type=text], input[type=password]').off('input').on('input', function() { self.doOnChange($(this).attr('name') || $(this).attr('id'), $(this).val()); }); this.el.find('input[type=file]').off('change').on('change', function() { self.doOnChange($(this).attr('name') || $(this).attr('id'), $(this).val()); }); this.el.find('.on-off-switch input').on("change", function() { var isChecked = $(this).is(":checked"), attrID = $(this).attr("id"); self.doOnChange(attrID, isChecked); }); setTimeout(function() { self.el.parent().find("h3[aria-controls=" + self.el.attr("id") + "]").find('.icon-button').off('click').on('click', self.save.bind(self)); }); if (this.isSaveAvailable()) { this.el.parent().find("h3[aria-controls=" + this.el.attr("id") + "]").find('.icon-button').show(); } else { this.el.parent().find("h3[aria-controls=" + this.el.attr("id") + "]").find('.icon-button').hide(); } app.localize(); this.afterRender(); return this; }, }); /** * Drop class with embeded countly theme, use as any Drop class/instance * @name CountlyDrop * @global * @namespace CountlyDrop */ var CountlyDrop = Drop.createContext({ classPrefix: 'countly-drop', }); var initializeOnce = _.once(function() { return $.when(countlyEvent.initialize()).then(function() { }); }); //redefine contains selector for jquery to be case insensitive $.expr[":"].contains = $.expr.createPseudo(function(arg) { return function(elem) { return $(elem).text().toUpperCase().indexOf(arg.toUpperCase()) >= 0; }; }); /** * Set menu items by restriction status, hiding empty menu-categories * @name setMenuItems * @global */ function setMenuItems() { // hide empty section headers var type = countlyCommon.ACTIVE_APP_ID && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID] && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type ? countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type : "mobile"; var categories = $('#' + type + '-type .menu-category'); for (var j = 0; j < categories.length; j++) { var children = categories[j].children; var isEmpty = true; for (var k = 0; k < children.length; k++) { if (children[k].className.indexOf('restrict') === -1 && children[k].className.indexOf('item') !== -1) { isEmpty = false; } } if (isEmpty) { $(categories[j]).hide(); } else { $(categories[j]).show(); } } } /** * Main app instance of Backbone AppRouter used to control views and view change flow * @name app * @global * @instance * @namespace app */ var AppRouter = Backbone.Router.extend({ routes: { "/": "dashboard", "*path": "main" }, /** * View that is currently being displayed * @type {countlyView} * @instance * @memberof app */ activeView: null, //current view dateToSelected: null, //date to selected from the date picker dateFromSelected: null, //date from selected from the date picker activeAppName: '', activeAppKey: '', _isFirstLoad: false, //to know if we are switching between two apps or just loading page refreshActiveView: 0, //refresh interval function reference _myRequests: {}, //save requests not connected with view to prevent calling the same if previous not finished yet. /** * Navigate to another view programmatically. If you need to change the view without user clicking anything, like redirect. You can do this using this method. This method is not define by countly but is direct method of AppRouter object in Backbone js * @name app#navigate * @function * @instance * @param {string} fragment - url path (hash part) where to redirect user * @param {boolean=} triggerRoute - to trigger route call, like initialize new view, etc. Default is false, so you may want to use false when redirecting to URL for your own same view where you are already, so no need to reload it * @memberof app * @example <caption>Redirect to url of the same view</caption> * //you are at #/manage/systemlogs * app.navigate("#/manage/systemlogs/query/{}"); * * @example <caption>Redirect to url of other view</caption> * //you are at #/manage/systemlogs * app.navigate("#/crashes", true); */ _removeUnfinishedRequests: function() { for (var url in this._myRequests) { for (var data in this._myRequests[url]) { //4 means done, less still in progress if (parseInt(this._myRequests[url][data].readyState) !== 4) { this._myRequests[url][data].abort(); } } } this._myRequests = {}; if (this.activeView) { this.activeView._removeMyRequests();//remove requests for view(if not finished) } }, switchApp: function(app_id, callback) { countlyCommon.setActiveApp(app_id); $("#active-app-name").text(countlyGlobal.apps[app_id].name); $("#active-app-name").attr('title', countlyGlobal.apps[app_id].name); $("#active-app-icon").css("background-image", "url('" + countlyGlobal.path + "appimages/" + app_id + ".png')"); //removing requests saved in app app._removeUnfinishedRequests(); if (app && app.activeView) { if (typeof callback === "function") { app.activeView.appChanged(function() { app.onAppSwitch(app_id); callback(); }); } else { app.activeView.appChanged(function() { app.onAppSwitch(app_id); }); } } else { if (typeof callback === "function") { callback(); } } }, _menuForTypes: {}, _subMenuForTypes: {}, _menuForAllTypes: [], _subMenuForAllTypes: [], _subMenuForCodes: {}, _subMenus: {}, _internalMenuCategories: ["management", "user"], /** * Add menu category. Categories will be copied for all app types and its visibility should be controled from the app type plugin * @memberof app * @param {string} category - new menu category * @param {Object} node - object defining category lement * @param {string} node.text - key for localization string which to use as text * @param {number} node.priority - priority order number, the less it is, the more on top category will be * @param {string} node.classes - string with css classes to add to category element * @param {string} node.style - string with css styling to add to category element * @param {string} node.html - additional HTML to append after text * @param {function} node.callback - called when and each time category is added passing same parameters as to this method plus added jquery category element as 3th param **/ addMenuCategory: function(category, node) { if (this._internalMenuCategories.indexOf(category) !== -1) { throw "Category already exists with name: " + category; } if (typeof node.priority === "undefined") { throw "Provide priority property for category element"; } //New sidebar container hook countlyVue.container.registerData("/sidebar/analytics/menuCategory", { name: category, priority: node.priority, title: node.text || countlyVue.i18n("sidebar.category." + category), node: node /* Following secondary params are simply passed to registry, but not directly used for now: * node.classes - string with css classes to add to category element * node.style - string with css styling to add to category element * node.html - additional HTML to append after text * node.callback */ }); var menu = $("<div></div>"); menu.addClass("menu-category"); menu.addClass(category + "-category"); menu.attr("data-priority", node.priority); if (node.classes) { menu.addClass(node.classes); } if (node.style) { menu.attr("style", node.style); } menu.append('<span class="menu-category-title" data-localize="' + (node.text || "sidebar.category." + category) + '"></span>'); if (node.html) { menu.append(node.html); } this._internalMenuCategories.push(category); var added = false; var selector = "#sidebar-menu .sidebar-menu"; //try adding before first greater priority $(selector + " > div.menu-category").each(function() { var cur = parseInt($(this).attr("data-priority")); if (node.priority < cur) { added = true; $(this).before(menu); return false; } }); //if not added, maybe we are first or last, so just add it if (!added) { $(selector).append(menu); } if (typeof node.callback === "function") { node.callback(category, node, menu); } }, updateLongTaskViewsNofification: function(appChanged) { countlyTaskManager.getLastReports(function(data) { if (appChanged) { app.haveUnreadReports = false; $(".orange-side-notification-banner-wrapper").css("display", "none"); } if (app.haveUnreadReports) { $("#manage-long-tasks-icon").addClass('unread'); } else { $("#manage-long-tasks-icon").removeClass('unread'); } var newHtml = "<table>"; for (var k = 0; k < data.length; k++) { var color = "#E98010"; if (data[k].status === "completed") { color = "#2FA732"; } if (data[k].status === "errored") { color = "#D63E40"; } var name = data[k].name || ""; if (name.length > 30) { name = name.substring(0, 30) + "..."; } var trclass = ""; var dataLink = ''; if (data[k].status === "completed") { name = name + '<i class="fa fa-chevron-right circle-arrow"></i>'; trclass = " class='completed'"; dataLink = ' data-link="' + data[k].view + data[k]._id + '" '; } newHtml = newHtml + "<tr" + trclass + dataLink + '><td><p class="title">' + name + '</p><p style="text-transform:capitalize">' + data[k].type + '<span>|</span>' + countlyCommon.formatTimeAgo(data[k].start) + '</p></td>' + '<td ><span class="status-color"><i class="fa fa-circle" style="color:' + color + ';"></i>' + data[k].status + '</span></td>'; } newHtml += "<table>"; if (data.length === 0) { $(".manage-long-tasks-menu .tasks-wrapper").html("<div class='graph-description' style='border:0'>" + jQuery.i18n.map["taskmanager.empty-warning"] + "</div>"); } else { $(".manage-long-tasks-menu .tasks-wrapper").html("<div class='tasks'>" + newHtml + "</div>"); } $(".manage-long-tasks-menu .tasks tr").on("click", function() { var link = $(this).data("link"); if (link && link !== "") { window.location.hash = link; } }); if (data.length > 5) { $(".manage-long-tasks-menu .tasks-wrapper").css("height", "355px"); $(".manage-long-tasks-menu .tasks").first().slimScroll({ height: '100%', start: 'top', wheelStep: 10, position: 'right', disableFadeOut: true }); } else { $(".manage-long-tasks-menu .tasks-wrapper").css("height", ""); } }); }, /** * Add first level menu element for specific app type under specified category. You can only add app type specific menu to categories "understand", "explore", "reach", "improve", "utilities" * @memberof app * @param {string} app_type - type of the app for which to add menu * @param {string} category - category under which to add menu: "understand", "explore", "reach", "improve", "utilities" * @param {Object} node - object defining menu lement * @param {string} node.text - key for localization string which to use as text * @param {string} node.code - code name for menu to reference for children, also assigned as id attribute with -menu postfix * @param {string} node.icon - HTML code for icon to show, usually a div element with font icon classes * @param {number} node.priority - priority order number, the less it is, the more on top menu will be * @param {string} node.url - url where menu points. Don't provide this, if it is upper menu and will contain children * @param {string} node.classes - string with css classes to add to menu element * @param {string} node.style - string with css styling to add to menu element * @param {string} node.html - additional HTML to append after text (use icon to append HTML before text) * @param {function} node.callback - called when and each time menu is added passing same parameters as to this method plus added jquery menu element as 4th param **/ addMenuForType: function(app_type, category, node) { if (this._internalMenuCategories.indexOf(category) === -1) { throw "Wrong category for menu: " + category; } if (!node.text || !node.code || typeof node.priority === "undefined") { throw "Provide code, text, icon and priority properties for menu element"; } //New sidebar container hook countlyVue.container.registerData("/sidebar/analytics/menu", { app_type: app_type, category: category, name: node.code, priority: node.priority, title: node.text, url: node.url, icon: node.icon, node: node /* Following secondary params are simply passed to registry, but not directly used for now: * node.classes - string with css classes to add to category element * node.style - string with css styling to add to category element * node.html - additional HTML to append after text * node.callback */ }); if (!this.appTypes[app_type] && category !== "management" && category !== "users") { //app type not yet register, queue if (!this._menuForTypes[app_type]) { this._menuForTypes[app_type] = []; } this._menuForTypes[app_type].push({category: category, node: node}); return; } //create menu element var menu = $("<a></a>"); menu.addClass("item"); menu.attr("data-priority", node.priority); menu.attr("id", node.code + "-menu"); if (node.url) { menu.attr("href", node.url); } if (node.classes) { menu.addClass(node.classes); } if (node.style) { menu.attr("style", node.style); } menu.append(node.icon); menu.append('<div class="text" data-localize="' + node.text + '">' + (jQuery.i18n.map[node.text] || node.text) + '</div>'); if (node.html) { menu.append(node.html); } if (!node.url && category !== "management" && category !== "users") { this._subMenus[node.code] = true; menu.hide(); menu = menu.add('<div class="sidebar-submenu" id="' + node.code + '-submenu">'); } var added = false; var selector = "#sidebar-menu #" + app_type + "-type ." + category + "-category"; if (category === "management") { //different selector for management menu selector = ".right-menu #manage-menu"; } else if (category === "users") { //different selector for users menu selector = ".right-menu #user-menu"; } //try adding before first greater priority $(selector + " > a").each(function() { var cur = parseInt($(this).attr("data-priority")); if (node.priority < cur) { added = true; $(this).before(menu); return false; } }); if (category === "management" && $(selector + " > a").length > 5) { $(selector).addClass("columns"); } //if not added, maybe we are first or last, so just add it if (!added) { $(selector).append(menu); } if (typeof node.callback === "function") { node.callback(app_type, category, node, menu); } //run all queued submenus for this parent if (!node.url && category !== "management" && category !== "users" && this._subMenuForCodes[node.code]) { for (i = 0; i < this._subMenuForCodes[node.code].length; i++) { this.addSubMenuForType(this._subMenuForCodes[node.code][i].app_type, node.code, this._subMenuForCodes[node.code][i].node); } this._subMenuForCodes[node.code] = null; } setMenuItems(); }, /** * Add second level menu element for specific app type under specified parent code. * @memberof app * @param {string} app_type - type of the app for which to add menu * @param {string} parent_code - code for parent element under which to add this submenu element * @param {Object} node - object defining menu lement * @param {string} node.text - key for localization string which to use as text * @param {string} node.code - code name for menu to reference for children, also assigned as id attribute with -menu postfix * @param {number} node.priority - priority order number, the less it is, the more on top menu will be * @param {string} node.url - url where menu points. Don't provide this, if it is upper menu and will contain children * @param {string} node.classes - string with css classes to add to menu element * @param {string} node.style - string with css styling to add to menu element * @param {string} node.html - additional HTML to append after text (use icon to append HTML before text) * @param {function} node.callback - called when and each time menu is added passing same parameters as to this method plus added jquery menu element as 4th param **/ addSubMenuForType: function(app_type, parent_code, node) { if (!parent_code) { throw "Provide code name for parent category"; } if (!node.text || !node.code || !node.url || !node.priority) { throw "Provide text, code, url and priority for sub menu"; } //New sidebar container hook countlyVue.container.registerData("/sidebar/analytics/submenu", { app_type: app_type, parent_code: parent_code, name: node.code, priority: node.priority, title: node.text, url: node.url, node: node /* Following secondary params are simply passed to registry, but not directly used for now: * node.classes - string with css classes to add to category element * node.style - string with css styling to add to category element * node.html - additional HTML to append after text * node.callback */ }); if (!this.appTypes[app_type]) { //app type not yet register, queue if (!this._subMenuForTypes[app_type]) { this._subMenuForTypes[app_type] = []; } this._subMenuForTypes[app_type].push({parent_code: parent_code, node: node}); return; } if (!this._subMenus[parent_code]) { //parent not yet registered, queue if (!this._subMenuForCodes[parent_code]) { this._subMenuForCodes[parent_code] = []; } this._subMenuForCodes[parent_code].push({app_type: app_type, node: node}); return; } //create menu element var menu = $("<a></a>"); menu.addClass("item"); menu.attr("data-priority", node.priority); menu.attr("id", node.code + "-menu"); menu.attr("href", node.url); if (node.classes) { menu.addClass(node.classes); } if (node.style) { menu.attr("style", node.style); } menu.append('<div class="text" data-localize="' + node.text + '">' + (jQuery.i18n.map[node.text] || node.text) + '</div>'); if (node.html) { menu.append(node.html); } var added = false; //try adding before first greater priority $("#sidebar-menu #" + app_type + "-type #" + parent_code + "-submenu > a").each(function() { var cur = parseInt($(this).attr("data-priority")); if (node.priority < cur) { added = true; $(this).before(menu); return false; } }); //if not added, maybe we are first or last, so just add it if (!added) { $("#sidebar-menu #" + app_type + "-type #" + parent_code + "-submenu").append(menu); } if ($("#sidebar-menu #" + app_type + "-type #" + parent_code + "-submenu > a").length === 1) { $("#sidebar-menu #" + app_type + "-type #" + parent_code + "-menu").attr("href", node.url); } else { $("#sidebar-menu #" + app_type + "-type #" + parent_code + "-menu").removeAttr("href"); } $("#sidebar-menu #" + app_type + "-type #" + parent_code + "-menu").css('display', 'block'); if (typeof node.callback === "function") { node.callback(app_type, parent_code, node, menu); } }, /** * Add first level menu element for all app types and special categories. * @memberof app * @param {string} category - category under which to add menu: "understand", "explore", "reach", "improve", "utilities", "management", "user" * @param {Object} node - object defining menu lement * @param {string} node.text - key for localization string which to use as text * @param {string} node.code - code name for menu to reference for children, also assigned as id attribute with -menu postfix * @param {string} node.icon - HTML code for icon to show, usually a div element with font icon classes * @param {number} node.priority - priority order number, the less it is, the more on top menu will be * @param {string} node.url - url where menu points. Don't provide this, if it is upper menu and will contain children * @param {string} node.classes - string with css classes to add to menu element * @param {string} node.style - string with css styling to add to menu element * @param {string} node.html - additional HTML to append after text (use icon to append HTML before text) * @param {function} node.callback - called when and each time menu is added passing same parameters as to this method plus added jquery menu element as 4th param **/ addMenu: function(category, node) { if (category === "management" || category === "users") { this.addMenuForType("default", category, node); } else { for (var type in this.appTypes) { this.addMenuForType(type, category, node); } //queue for future added app types this._menuForAllTypes.push({category: category, node: node}); } }, /** * Add second level sub menu element for all app types (not available for special categories as "management" and "user") * @memberof app * @param {string} parent_code - code for parent element under which to add this submenu element * @param {Object} node - object defining menu lement * @param {string} node.text - key for localization string which to use as text * @param {string} node.code - code name for menu to reference for children, also assigned as id attribute with -menu postfix * @param {string} node.icon - HTML code for icon to show, usually a div element with font icon classes * @param {number} node.priority - priority order number, the less it is, the more on top menu will be * @param {string} node.url - url where menu points. Don't provide this, if it is upper menu and will contain children * @param {string} node.classes - string with css classes to add to menu element * @param {string} node.style - string with css styling to add to menu element * @param {string} node.html - additional HTML to append after text (use icon to append HTML before text) * @param {function} node.callback - called when and each time menu is added passing same parameters as to this method plus added jquery menu element as 4th param **/ addSubMenu: function(parent_code, node) { for (var type in this.appTypes) { this.addSubMenuForType(type, parent_code, node); } //queue for future added app types this._subMenuForAllTypes.push({parent_code: parent_code, node: node}); }, main: function(/*forced*/) { var change = true, redirect = false; // detect app switch like //#/app/586e32ddc32cb30a01558cc1/analytics/events if (Backbone.history.fragment.indexOf("/app/") === 0) { var app_id = Backbone.history.fragment.replace("/app/", ""); redirect = "#/"; if (app_id && app_id.length) { if (app_id.indexOf("/") !== -1) { var parts = app_id.split("/"); app_id = parts.shift(); redirect = "#/" + parts.join("/"); } if (app_id !== countlyCommon.ACTIVE_APP_ID && countlyGlobal.apps[app_id]) { app.switchApp(app_id, function() { app.navigate(redirect, true); }); return; } } } else if (Backbone.history.fragment.indexOf("/0/") === 0 && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID]) { this.navigate("#/" + countlyCommon.ACTIVE_APP_ID + Backbone.history.fragment.replace("/0", ""), true); return; } else if (Backbone.history.fragment !== "/" && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID]) { $("#" + countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type + "-type a").each(function() { if (this.hash !== "#/" && this.hash !== "") { if ("#" + Backbone.history.fragment === this.hash && $(this).css('display') !== 'none') { change = false; return false; } else if (("#" + Backbone.history.fragment).indexOf(this.hash) === 0 && $(this).css('display') !== 'none') { redirect = this.hash; return false; } } }); } if (redirect) { app.navigate(redirect, true); } else if (change) { if (Backbone.history.fragment !== "/") { this.navigate("#/", true); } else if (countlyCommon.APP_NAMESPACE !== false) { this.navigate("#/" + countlyCommon.ACTIVE_APP_ID + Backbone.history.fragment, true); } else { this.dashboard(); } } else { if (countlyCommon.APP_NAMESPACE !== false) { this.navigate("#/" + countlyCommon.ACTIVE_APP_ID + Backbone.history.fragment, true); } else { this.activeView.render(); } } }, dashboard: function() { var type = countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type; if (countlyGlobal.member.restrict && countlyGlobal.member.restrict.indexOf("#/") !== -1) { return; } if (_.isEmpty(countlyGlobal.apps)) { this.renderWhenReady(this.manageAppsView); } else if (type === "mobile" || type === "web" || type === "desktop") { this.renderWhenReady(app.HomeView); } else if (typeof this.appTypes[countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type] !== "undefined") { this.renderWhenReady(this.appTypes[countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type]); } else { this.renderWhenReady(this.dashboardView); } }, runRefreshScripts: function() { var i = 0; var l = 0; if (this.refreshScripts[Backbone.history.fragment]) { for (i = 0, l = this.refreshScripts[Backbone.history.fragment].length; i < l; i++) { this.refreshScripts[Backbone.history.fragment][i](); } } for (var k in this.refreshScripts) { if (k !== '#' && k.indexOf('#') !== -1 && Backbone.history.fragment.match("^" + k.replace(/#/g, '.*'))) { for (i = 0, l = this.refreshScripts[k].length; i < l; i++) { this.refreshScripts[k][i](); } } } if (this.refreshScripts["#"]) { for (i = 0, l = this.refreshScripts["#"].length; i < l; i++) { this.refreshScripts["#"][i](); } } }, performRefresh: function(self) { //refresh only if we are on current period if (countlyCommon.periodObj.periodContainsToday && self.activeView.isLoaded) { self.activeView.isLoaded = false; $.when(self.activeView.refresh()).always(function() { self.activeView.isLoaded = true; self.runRefreshScripts(); }); } }, renderWhenReady: function(viewName) { //all view renders end up here // If there is an active view call its destroy function to perform cleanups before a new view renders if (this.activeView) { this.activeView._removeMyRequests(); this.activeView.destroy(); } if (window.components && window.components.slider && window.components.slider.instance) { window.components.slider.instance.close(); } this.activeView = viewName; clearInterval(this.refreshActiveView); if (typeof countlyGlobal.member.password_changed === "undefined") { countlyGlobal.member.password_changed = Math.round(new Date().getTime() / 1000); } this.routesHit++; if (_.isEmpty(countlyGlobal.apps)) { if (Backbone.history.fragment !== "/manage/apps") { this.navigate("/manage/apps", true); } else { viewName.render(); } return false; } else if ((countlyGlobal.security.password_expiration > 0) && (countlyGlobal.member.password_changed + countlyGlobal.security.password_expiration * 24 * 60 * 60 < new Date().getTime() / 1000) && (!countlyGlobal.ssr)) { if (Backbone.history.fragment !== "/manage/user-settings/reset") { this.navigate("/manage/user-settings/reset", true); } else { viewName.render(); } return false; } viewName.render(); var self = this; this.refreshActiveView = setInterval(function() { self.performRefresh(self); }, countlyCommon.DASHBOARD_REFRESH_MS); if (countlyGlobal && countlyGlobal.message) { CountlyHelpers.parseAndShowMsg(countlyGlobal.message); } // Init sidebar based on the current url self.sidebar.init(); }, sidebar: { init: function() { setTimeout(function() { $("#sidebar-menu").find(".item").removeClass("active menu-active"); $("#sidebar-menu").find(".menu-category-title").removeClass("active"); var selectedMenu = $($("#sidebar-menu").find("a[href='#" + Backbone.history.fragment + "']")); if (!selectedMenu.length) { var parts = Backbone.history.fragment.split("/"); selectedMenu = $($("#sidebar-menu").find("a[href='#/" + (parts[1] || "") + "']")); if (!selectedMenu.length) { selectedMenu = $($("#sidebar-menu").find("a[href='#/" + (parts[1] + "/" + parts[2] || "") + "']")); } } var selectedSubmenu = selectedMenu.parents(".sidebar-submenu"); if (selectedSubmenu.length) { selectedMenu.addClass("active"); selectedSubmenu.prev().addClass("active menu-active"); app.sidebar.submenu.toggle(selectedSubmenu); } else { selectedMenu.addClass("active"); app.sidebar.submenu.toggle(); } var selectedCategory = selectedMenu.parents(".menu-category"); if (selectedCategory.length) { selectedCategory.find(".menu-category-title").addClass("active"); } setMenuItems(); }, 1000); }, submenu: { toggle: function(el) { $(".sidebar-submenu").removeClass("half-visible"); if (!el) { $(".sidebar-submenu:visible").animate({ "right": "-170px" }, { duration: 300, easing: 'easeOutExpo', complete: function() { $(this).hide(); } }); return true; } if (!el.is(":visible")) { if ($(".sidebar-submenu").is(":visible")) { $(".sidebar-submenu").hide(); el.css({ "right": "-110px" }).show().animate({ "right": "0" }, { duration: 300, easing: 'easeOutExpo' }); addText(); } else { el.css({ "right": "-170px" }).show().animate({ "right": "0" }, { duration: 300, easing: 'easeOutExpo' }); addText(); } } /** function add text to menu title */ function addText() { var mainMenuText = $(el.prev()[0]).find(".text").text(); $(".menu-title").remove(); var menuTitle = $("<div class='menu-title'></div>").text(mainMenuText).prepend("<i class='submenu-close ion-close'></i>"); el.prepend(menuTitle); // Try setting submenu title once again if it was empty // during previous try if (!mainMenuText) { setTimeout(function() { $(".menu-title").text($(el.prev()[0]).find(".text").text()); $(".menu-title").prepend("<i class='submenu-close ion-close'></i>"); }, 1000); } } } } }, hasRoutingHistory: function() { if (this.routesHit > 1) { return true; } return false; }, back: function(fallback_route) { if (this.routesHit > 1) { window.history.back(); } else { var fragment = Backbone.history.getFragment(); //route not passed, try to guess from current location if (typeof fallback_route === "undefined" || fallback_route === "") { if (fragment) { var parts = fragment.split("/"); if (parts.length > 1) { fallback_route = "/" + parts[1]; } } } if (fallback_route === fragment) { fallback_route = '/'; } this.navigate(fallback_route || '/', {trigger: true, replace: true}); } }, initialize: function() { //initialize the dashboard, register helpers etc. this.bind("route", function(name/*, args*/) { $('#content').removeClass(function(index, className) { return (className.match(/(^|\s)routename-\S*/g) || []).join(' '); }).addClass("routename-" + name); }); this.appTypes = {}; this.pageScripts = {}; this.dataExports = {}; this.appSwitchCallbacks = []; this.appManagementSwitchCallbacks = []; this.appObjectModificators = []; this.appManagementViews = {}; this.appAddTypeCallbacks = []; this.userEditCallbacks = []; this.refreshScripts = {}; this.appSettings = {}; this.widgetCallbacks = {}; var self = this; $(document).ready(function() { /** * Add menus **/ self.addMenuCategory("understand", {priority: 10}); self.addMenuCategory("explore", {priority: 20}); self.addMenuCategory("reach", {priority: 30}); self.addMenuCategory("improve", {priority: 40}); self.addMenuCategory("utilities", {priority: 50}); self.addMenu("understand", {code: "overview", url: "#/", text: "sidebar.home", icon: '<div class="logo dashboard ion-speedometer"></div>', priority: 10}); self.addMenu("understand", {code: "analytics", text: "sidebar.analytics", icon: '<div class="logo analytics ion-ios-pulse-strong"></div>', priority: 20}); self.addMenu("understand", {code: "events", text: "sidebar.events", icon: '<div class="logo events"><i class="material-icons">bubble_chart</i></div>', priority: 40}); // self.addMenu("understand", {code: "engagement", text: "sidebar.engagement", icon: '<div class="logo ion-happy-outline"></div>', priority: 30}); self.addSubMenu("events", {code: "events-overview", url: "#/analytics/events/overview", text: "sidebar.events.overview", priority: 10}); if (countlyAuth.validateRead('events')) { self.addSubMenu("events", {code: "all-events", url: "#/analytics/events", text: "sidebar.events.all-events", priority: 20}); } // if (countlyAuth.validateUpdate('events') || countlyAuth.validateDelete('events')) { // self.addSubMenu("events", {code: "manage-events", url: "#/analytics/manage-events", text: "sidebar.events.blueprint", priority: 100}); // } self.addMenu("utilities", { code: "management", text: "sidebar.utilities", icon: '<div class="logo management ion-wrench"></div>', priority: 10000000, callback: function(type, category, node, menu) { //for backwards compatability of old plugins adding menu to management menu.filter("#management-submenu").append("<span class='help-toggle'></span>"); } }); if (countlyAuth.validateRead('core')) { self.addSubMenu("management", {code: "longtasks", url: "#/manage/tasks", text: "sidebar.management.longtasks", priority: 10}); } //management is also a menu category which goes in default menu i.e. visible to all users var jobsIconSvg = '<svg width="20px" height="16px" viewBox="0 0 12 10" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"><title>list-24px 2</title><g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd"><g id="list-24px-2" fill="#9f9f9f" fill-rule="nonzero"><g id="list-24px"><path d="M0,6 L2,6 L2,4 L0,4 L0,6 Z M0,10 L2,10 L2,8 L0,8 L0,10 Z M0,2 L2,2 L2,0 L0,0 L0,2 Z M3,6 L12,6 L12,4 L3,4 L3,6 Z M3,10 L12,10 L12,8 L3,8 L3,10 Z M3,0 L3,2 L12,2 L12,0 L3,0 Z" id="Shape"></path></g></g></g></svg>'; if (countlyAuth.validateRead('global_applications')) { self.addMenu("management", {code: "applications", url: "#/manage/apps", text: "sidebar.management.applications", icon: '<div class="logo-icon ion-ios-albums"></div>', priority: 80}); } if (countlyAuth.validateRead('global_users')) { self.addMenu("management", {code: "users", url: "#/manage/users", text: "sidebar.management.users", icon: '<div class="logo-icon fa fa-user-friends"></div>', priority: 70}); } if (countlyAuth.validateRead('global_jobs')) { self.addMenu("management", {code: "jobs", url: "#/manage/jobs", text: "sidebar.management.jobs", icon: '<div class="logo-icon">' + jobsIconSvg + '</div>', priority: 140}); } // self.addMenu("management", {code: "help", text: "sidebar.management.help", icon: '<div class="logo-icon ion-help help"></div>', classes: "help-toggle", html: '<div class="on-off-switch" id="help-toggle"><input type="checkbox" class="on-off-switch-checkbox" id="help-toggle-cbox"><label class="on-off-switch-label" for="help-toggle-cbox"></label></div>', priority: 10000000}); // self.addMenu("explore", {code: "users", text: "sidebar.analytics.users", icon: '<div class="logo ion-person-stalker"></div>', priority: 10}); // self.addMenu("explore", {code: "behavior", text: "sidebar.behavior", icon: '<div class="logo ion-funnel"></div>', priority: 20}); if ((countlyGlobal.plugins.indexOf("surveys") > -1) || (countlyGlobal.plugins.indexOf("star-rating") > -1)) { app.addMenu("reach", {code: "feedback", text: "sidebar.feedback", icon: '<div class="logo ion-android-star-half"></div>', priority: 20}); } if ((countlyGlobal.plugins.indexOf("crashes") > -1) || (countlyGlobal.plugins.indexOf("crash_symbolication") > -1)) { app.addMenu("improve", {code: "crashes", text: "crashes.title", icon: '<div class="logo ion-alert-circled"></div>', priority: 10}); } Backbone.history.checkUrl(); $('.list .item_info').on("click", function(e) { e.stopPropagation(); }); }); this.routesHit = 0; //keep count of number of routes handled by your application /** * When rendering data from server using templates from frontend/express/views we are using ejs as templating engine. But when rendering templates on the browser side remotely loaded templates through ajax, we are using Handlebars templating engine. While in ejs everything is simple and your templating code is basically javascript code betwee <% %> tags. Then with Handlebars it is not that straightforward and we need helper functions to have some common templating logic * @name Handlebars * @global * @instance * @namespace Handlebars */ /** * Display common date selecting UI elements * @name date-selector * @memberof Handlebars * @example * {{> date-selector }} */ Handlebars.registerPartial("date-selector", $("#template-date-selector").html()); /** * Get id value from ObjectID string * @name getIdValue * @memberof Handlebars * @example * <span>{{#clearObjectId value}}{{/clearObjectId}}</span> */ Handlebars.registerHelper('clearObjectId', function(object) { if (object) { var id = object._id; if (typeof id === "string") { if (id.substr(0, 3) === "Obj") { id = id.split("(")[1].split(")")[0]; } return id; } else { return ""; } } else { return ''; } }); /** * Display common date time selecting UI elements * @name date-time-selector * @memberof Handlebars * @example * {{> date-time-selector }} */ Handlebars.registerPartial("date-time-selector", $("#template-date-time-selector").html()); /** * Display common timezone selecting UI element * @name timezones * @memberof Handlebars * @example * {{> timezones }} */ Handlebars.registerPartial("timezones", $("#template-timezones").html()); /** * Display common app category selecting UI element * @name app-categories * @memberof Handlebars * @example * {{> app-categories }} */ Handlebars.registerPartial("app-categories", $("#template-app-categories").html()); /** * Iterate object with keys and values, creating variable "property" for object key and variable "value" for object value * @name eachOfObject * @memberof Handlebars * @example * {{#eachOfObject app_types}} * <div data-value="{{property}}" class="item">{{value}}</div> * {{/eachOfObject}} */ Handlebars.registerHelper('eachOfObject', function(context, options) { var ret = ""; for (var prop in context) { ret = ret + options.fn({ property: prop, value: context[prop] }); } return ret; }); /** * Iterate only values of object, this will reference the value of current object * @name eachOfObjectValue * @memberof Handlebars * @example * {{#eachOfObjectValue apps}} * <div class="app searchable"> * <div class="image" style="background-image: url('/appimages/{{this._id}}.png');"></div> * <div class="name">{{this.name}}</div> * <input class="app_id" type="hidden" value="{{this._id}}"/> * </div> * {{/eachOfObjectValue}} */ Handlebars.registerHelper('eachOfObjectValue', function(context, options) { var ret = ""; for (var prop in context) { ret = ret + options.fn(context[prop]); } return ret; }); /** * Iterate through array, creating variable "index" for element index and variable "value" for value at that index * @name eachOfArray * @memberof Handlebars * @example * {{#eachOfArray events}} * <div class="searchable event-container {{#if value.is_active}}active{{/if}}" data-key="{{value.key}}"> * <div class="name">{{value.name}}</div> * </div> * {{/eachOfArray}} */ Handlebars.registerHelper('eachOfArray', function(context, options) { var ret = ""; for (var i = 0; i < context.length; i++) { ret = ret + options.fn({ index: i, value: context[i] }); } return ret; }); /** * Print out json in pretty indented way * @name prettyJSON * @memberof Handlebars * @example * <td class="jh-value jh-object-value">{{prettyJSON value}}</td> */ Handlebars.registerHelper('prettyJSON', function(context) { return JSON.stringify(context, undefined, 4); }); /** * Shorten number, Handlebar binding to {@link countlyCommon.getShortNumber} * @name getShortNumber * @memberof Handlebars * @example * <span class="value">{{getShortNumber this.data.total}}</span> */ Handlebars.registerHelper('getShortNumber', function(context) { return countlyCommon.getShortNumber(context); }); /** * Format float number up to 2 values after dot * @name getFormattedNumber * @memberof Handlebars * @example * <div class="number">{{getFormattedNumber this.total}}</div> */ Handlebars.registerHelper('getFormattedNumber', function(context) { if (isNaN(context)) { return context; } var ret = parseFloat((parseFloat(context).toFixed(2)).toString()).toString(); return ret.replace(/(\d)(?=(\d\d\d)+(?!\d))/g, "$1,"); }); /** * Convert text to upper case * @name toUpperCase * @memberof Handlebars * @example * <div class="title">{{toUpperCase page-title}}</div> */ Handlebars.registerHelper('toUpperCase', function(context) { return context.toUpperCase(); }); /** * Convert array of app ids to comma separate string of app names. Handlebar binding to {@link CountlyHelpers.appIdsToNames} * @name appIdsToNames * @memberof Handlebars * @example * <div class="apps">{{appIdsToNames appIds}}</div> */ Handlebars.registerHelper('appIdsToNames', function(context) { return CountlyHelpers.appIdsToNames(context); }); /** * Loop for specified amount of times. Creating variable "count" as current index from 1 to provided value * @name forNumberOfTimes * @memberof Handlebars * @example * <ul> * {{#forNumberOfTimes 10}} * <li>{{count}}</li> * {{/forNumberOfTimes}} * </ul> */ Handlebars.registerHelper('forNumberOfTimes', function(context, options) { var ret = ""; for (var i = 0; i < context; i++) { ret = ret + options.fn({ count: i + 1 }); } return ret; }); /** * Loop for specified amount of times. with variable "need" & "now", loop time will be ${need} - ${now} * @name forNumberOfTimes * @memberof Handlebars * @example * <ul> * {{#forNumberOfTimes 10 3}} // will loop 7 times * <li>{{count}}</li> * {{/forNumberOfTimes}} * </ul> */ Handlebars.registerHelper('forNumberOfTimesCalc', function(need, now, options) { var ret = ""; var context = parseInt(need) - parseInt(now) ; for (var i = 0; i < context; i++) { ret = ret + options.fn({ count: i + 1 }); } return ret; }); /** * Replaces part of a string with a string. * @name replace * @memberof Handlebars * @example * <span>{{#replace value "(" " ("}}{{/replace}}</span> */ Handlebars.registerHelper('replace', function(string, to_replace, replacement) { return (string || '').replace(to_replace, replacement); }); /** * Limit string length. * @name limitString * @memberof Handlebars * @example * <span>{{#limitString value 15}}{{/limitString}}</span> */ Handlebars.registerHelper('limitString', function(string, limit) { if (string.length > limit) { return (string || '').substr(0, limit) + ".."; } else { return string; } }); /** * Round the number * @name round * @memberof Handlebars * @example * <span>{{round number limit}}</span> */ Handlebars.registerHelper('round', function(number, limit) { return countlyCommon.round(number, limit); }); Handlebars.registerHelper('include', function(templatename, options) { var partial = Handlebars.partials[templatename]; var context = $.extend({}, this, options.hash); return partial(context); }); /** * For loop in template providing start count, end count and increment * @name for * @memberof Handlebars * @example * {{#for start end 1}} * {{#ifCond this "==" ../data.curPage}} * <a href='#/manage/db/{{../../db}}/{{../../collection}}/page/{{this}}' class="current">{{this}}</a> * {{else}} * <a href='#/manage/db/{{../../db}}/{{../../collection}}/page/{{this}}'>{{this}}</a> * {{/ifCond}} * {{/for}} */ Handlebars.registerHelper('for', function(from, to, incr, block) { var accum = ''; for (var i = from; i < to; i += incr) { accum += block.fn(i); } return accum; }); /** * If condition with different operators, accepting first value, operator and second value. * Accepted operators are ==, !=, ===, <, <=, >, >=, &&, || * @name ifCond * @memberof Handlebars * @example * {{#ifCond this.data.trend "==" "u"}} * <i class="material-icons">trending_up</i> * {{else}} * <i class="material-icons">trending_down</i> * {{/ifCond}} */ Handlebars.registerHelper('ifCond', function(v1, operator, v2, options) { switch (operator) { case '==': return (v1 == v2) ? options.fn(this) : options.inverse(this); // eslint-disable-line case '!=': return (v1 != v2) ? options.fn(this) : options.inverse(this); // eslint-disable-line case '!==': return (v1 !== v2) ? options.fn(this) : options.inverse(this); case '===': return (v1 === v2) ? options.fn(this) : options.inverse(this); case '<': return (v1 < v2) ? options.fn(this) : options.inverse(this); case '<=': return (v1 <= v2) ? options.fn(this) : options.inverse(this); case '>': return (v1 > v2) ? options.fn(this) : options.inverse(this); case '>=': return (v1 >= v2) ? options.fn(this) : options.inverse(this); case '&&': return (v1 && v2) ? options.fn(this) : options.inverse(this); case '||': return (v1 || v2) ? options.fn(this) : options.inverse(this); default: return options.inverse(this); } }); /** * Format timestamp to twitter like time ago format, Handlebar binding to {@link countlyCommon.formatTimeAgo} * @name formatTimeAgo * @memberof Handlebars * @example * <div class="time">{{{formatTimeAgo value.time}}</div> */ Handlebars.registerHelper('formatTimeAgo', function(context) { return countlyCommon.formatTimeAgo(parseInt(context) / 1000); }); /** * Get value form object by specific key, this will reference value of the object * @name withItem * @memberof Handlebars * @example * <p>{{#withItem ../apps key=app_id}}{{this}}{{/withItem}}</p> */ Handlebars.registerHelper('withItem', function(object, options) { return options.fn(object[options.hash.key]); }); /** * Encode uri component * @name encodeURIComponent * @memberof Handlebars * @example * <a href="/path/{{encodeURIComponent entity}}" </a> */ Handlebars.registerHelper('encodeURIComponent', function(entity) { return encodeURIComponent(entity); }); $("body").addClass("lang-" + countlyCommon.BROWSER_LANG_SHORT); jQuery.i18n.properties({ name: window.production ? 'localization/min/locale' : ["localization/dashboard/dashboard", "localization/help/help", "localization/mail/mail"].concat(countlyGlobal.plugins.map(function(plugin) { return plugin + "/localization/" + plugin; })), cache: true, language: countlyCommon.BROWSER_LANG_SHORT, countlyVersion: countlyGlobal.countlyVersion + "&" + countlyGlobal.pluginsSHA, path: countlyGlobal.cdn, mode: 'map', callback: function() { for (var key in jQuery.i18n.map) { if (countlyGlobal.company) { jQuery.i18n.map[key] = jQuery.i18n.map[key].replace(new RegExp("Countly", 'ig'), countlyGlobal.company); } jQuery.i18n.map[key] = countlyCommon.encodeSomeHtml(jQuery.i18n.map[key]); } self.origLang = JSON.stringify(jQuery.i18n.map); } }); $(document).ready(function() { CountlyHelpers.initializeSelect(); CountlyHelpers.initializeTextSelect(); CountlyHelpers.initializeMultiSelect(); $(document).on('DOMNodeInserted', '.cly-select', function() { CountlyHelpers.makeSelectNative(); }); $.ajaxPrefilter(function(options) { var last5char = options.url.substring(options.url.length - 5, options.url.length); if (last5char === ".html") { var version = countlyGlobal.countlyVersion || ""; options.url = options.url + "?v=" + version; } }); var validateSession = function() { $.ajax({ url: countlyGlobal.path + "/session", data: {check_session: true}, success: function(result) { if (result === "logout") { $("#user-logout").click(); } if (result === "login") { $("#user-logout").click(); window.location = "/login"; } setTimeout(function() { validateSession(); }, countlyCommon.DASHBOARD_VALIDATE_SESSION || 30000); } }); }; setTimeout(function() { validateSession(); }, countlyCommon.DASHBOARD_VALIDATE_SESSION || 30000);//validates session each 30 seconds if (parseInt(countlyGlobal.config.session_timeout)) { var minTimeout, tenSecondTimeout, logoutTimeout; var shouldRecordAction = false; var extendSession = function() { shouldRecordAction = false; $.ajax({ url: countlyGlobal.path + "/session", success: function(result) { if (result === "logout") { $("#user-logout").click(); } if (result === "login") { $("#user-logout").click(); window.location = "/login"; } else if (result === "success") { shouldRecordAction = false; var myTimeoutValue = parseInt(countlyGlobal.config.session_timeout) * 1000 * 60; if (myTimeoutValue > 2147483647) { //max value used by set timeout function myTimeoutValue = 1800000;//30 minutes } setTimeout(function() { shouldRecordAction = true; }, Math.round(myTimeoutValue / 2)); resetSessionTimeouts(myTimeoutValue); } }, error: function() { shouldRecordAction = true; } }); }; var resetSessionTimeouts = function(timeout) { var minute = timeout - 60 * 1000; if (minTimeout) { clearTimeout(minTimeout); minTimeout = null; } if (minute > 0) { minTimeout = setTimeout(function() { CountlyHelpers.notify({ title: jQuery.i18n.map["common.session-expiration"], message: jQuery.i18n.map["common.expire-minute"], info: jQuery.i18n.map["common.click-to-login"] }); }, minute); } var tenSeconds = timeout - 10 * 1000; if (tenSecondTimeout) { clearTimeout(tenSecondTimeout); tenSecondTimeout = null; } if (tenSeconds > 0) { tenSecondTimeout = setTimeout(function() { CountlyHelpers.notify({ title: jQuery.i18n.map["common.session-expiration"], message: jQuery.i18n.map["common.expire-seconds"], info: jQuery.i18n.map["common.click-to-login"] }); }, tenSeconds); } if (logoutTimeout) { clearTimeout(logoutTimeout); logoutTimeout = null; } logoutTimeout = setTimeout(function() { extendSession(); }, timeout + 1000); }; var myTimeoutValue = parseInt(countlyGlobal.config.session_timeout) * 1000 * 60; //max value used by set timeout function if (myTimeoutValue > 2147483647) { myTimeoutValue = 1800000; }//30 minutes resetSessionTimeouts(myTimeoutValue); $(document).on("click mousemove extend-dashboard-user-session", function() { if (shouldRecordAction) { extendSession(); } }); extendSession(); } // If date range is selected initialize the calendar with these var periodObj = countlyCommon.getPeriod(); if (Object.prototype.toString.call(periodObj) === '[object Array]' && periodObj.length === 2) { self.dateFromSelected = parseInt(periodObj[0], 10) + countlyCommon.getOffsetCorrectionForTimestamp(parseInt(periodObj[0], 10)); self.dateToSelected = parseInt(periodObj[1], 10) + countlyCommon.getOffsetCorrectionForTimestamp(parseInt(periodObj[1], 10)); } // Initialize localization related stuff // Localization test /* $.each(jQuery.i18n.map, function (key, value) { jQuery.i18n.map[key] = key; }); */ try { moment.locale(countlyCommon.BROWSER_LANG_SHORT); } catch (e) { moment.locale("en"); } $(".reveal-language-menu").text(countlyCommon.BROWSER_LANG_SHORT.toUpperCase()); $("#sidebar-events").click(function(e) { $.when(countlyEvent.refreshEvents()).then(function() { if (countlyEvent.getEvents().length === 0) { CountlyHelpers.alert(jQuery.i18n.map["events.no-event"], "black"); e.stopImmediatePropagation(); e.preventDefault(); } }); }); // SIDEBAR $("#sidebar-menu").on("click", ".submenu-close", function() { $(this).parents(".sidebar-submenu").animate({ "right": "-170px" }, { duration: 200, easing: 'easeInExpo', complete: function() { $(".sidebar-submenu").hide(); $("#sidebar-menu>.sidebar-menu>.menu-category>.item").removeClass("menu-active"); } }); }); $("#sidebar-menu").on("click", ".item", function() { if ($(this).hasClass("menu-active")) { return true; } $("#sidebar-menu>.sidebar-menu>.menu-category>.item").removeClass("menu-active"); var elNext = $(this).next(); if (elNext.hasClass("sidebar-submenu")) { $(this).addClass("menu-active"); self.sidebar.submenu.toggle(elNext); } else { $("#sidebar-menu").find(".item").removeClass("active"); $(this).addClass("active"); var mainMenuItem = $(this).parent(".sidebar-submenu").prev(".item"); if (mainMenuItem.length) { mainMenuItem.addClass("active menu-active"); } else { self.sidebar.submenu.toggle(); } } }); $("#sidebar-menu").hoverIntent({ over: function() { var visibleSubmenu = $(".sidebar-submenu:visible"); if (!$(this).hasClass("menu-active") && $(".sidebar-submenu").is(":visible") && !visibleSubmenu.hasClass("half-visible")) { visibleSubmenu.addClass("half-visible"); visibleSubmenu.animate({ "right": "-110px" }, { duration: 300, easing: 'easeOutExpo' }); } }, out: function() { }, selector: ".sidebar-menu>.menu-category>.item" }); $("#sidebar-menu").hoverIntent({ over: function() { }, out: function() { var visibleSubmenu = $(".sidebar-submenu:visible"); if ($(".sidebar-submenu").is(":visible") && visibleSubmenu.hasClass("half-visible")) { visibleSubmenu.removeClass("half-visible"); visibleSubmenu.animate({ "right": "0" }, { duration: 300, easing: 'easeOutExpo' }); } }, selector: "" }); $("#sidebar-menu").hoverIntent({ over: function() { var visibleSubmenu = $(".sidebar-submenu:visible"); if (visibleSubmenu.hasClass("half-visible")) { visibleSubmenu.removeClass("half-visible"); visibleSubmenu.animate({ "right": "0" }, { duration: 300, easing: 'easeOutExpo' }); } }, out: function() { }, selector: ".sidebar-submenu:visible" }); $('#sidebar-menu').slimScroll({ height: ($(window).height()) + 'px', railVisible: true, railColor: '#4CC04F', railOpacity: .2, color: '#4CC04F', disableFadeOut: false, }); $(window).resize(function() { $('#sidebar-menu').slimScroll({ height: ($(window).height()) + 'px' }); }); $(".sidebar-submenu").on("click", ".item", function() { if ($(this).hasClass("disabled")) { return true; } $(".sidebar-submenu .item").removeClass("active"); $(this).addClass("active"); $(this).parent().prev(".item").addClass("active"); }); $("#language-menu .item").click(function() { var langCode = $(this).data("language-code"), langCodeUpper = langCode.toUpperCase(); store.set("countly_lang", langCode); $(".reveal-language-menu").text(langCodeUpper); countlyCommon.BROWSER_LANG_SHORT = langCode; countlyCommon.BROWSER_LANG = langCode; $("body").removeClass(function(index, className) { return (className.match(/(^|\s)lang-\S*/g) || []).join(' '); }).addClass("lang-" + langCode); try { moment.locale(countlyCommon.BROWSER_LANG_SHORT); } catch (e) { moment.locale("en"); } countlyCommon.getMonths(true); $("#date-to").datepicker("option", $.datepicker.regional[countlyCommon.BROWSER_LANG]); $("#date-from").datepicker("option", $.datepicker.regional[countlyCommon.BROWSER_LANG]); $.ajax({ type: "POST", url: countlyGlobal.path + "/user/settings/lang", data: { "username": countlyGlobal.member.username, "lang": countlyCommon.BROWSER_LANG_SHORT, _csrf: countlyGlobal.csrf_token }, success: function() { } }); jQuery.i18n.properties({ name: window.production ? 'localization/min/locale' : ["localization/dashboard/dashboard", "localization/help/help", "localization/mail/mail"].concat(countlyGlobal.plugins.map(function(plugin) { return plugin + "/localization/" + plugin; })), cache: true, language: countlyCommon.BROWSER_LANG_SHORT, countlyVersion: countlyGlobal.countlyVersion + "&" + countlyGlobal.pluginsSHA, path: countlyGlobal.cdn, mode: 'map', callback: function() { for (var key in jQuery.i18n.map) { if (countlyGlobal.company) { jQuery.i18n.map[key] = jQuery.i18n.map[key].replace(new RegExp("Countly", 'ig'), countlyGlobal.company); } jQuery.i18n.map[key] = countlyCommon.encodeSomeHtml(jQuery.i18n.map[key]); } self.origLang = JSON.stringify(jQuery.i18n.map); $.when(countlyLocation.changeLanguage()).then(function() { self.activeView.render(); }); } }); }); $(document).on('click', "#save-account-details:not(.disabled)", function() { var username = $(".dialog #username").val(), old_pwd = $(".dialog #old_pwd").val(), new_pwd = $(".dialog #new_pwd").val(), re_new_pwd = $(".dialog #re_new_pwd").val(), api_key = $(".dialog #api-key").val(); if (new_pwd !== re_new_pwd) { $(".dialog #settings-save-result").addClass("red").text(jQuery.i18n.map["user-settings.password-match"]); return true; } $(this).addClass("disabled"); $.ajax({ type: "POST", url: countlyGlobal.path + "/user/settings", data: { "username": username, "old_pwd": old_pwd, "new_pwd": new_pwd, "api_key": api_key, _csrf: countlyGlobal.csrf_token }, success: function(result) { var saveResult = $(".dialog #settings-save-result"); if (result === "username-exists") { saveResult.removeClass("green").addClass("red").text(jQuery.i18n.map["management-users.username.exists"]); } else if (!result) { saveResult.removeClass("green").addClass("red").text(jQuery.i18n.map["user-settings.alert"]); } else { saveResult.removeClass("red").addClass("green").text(jQuery.i18n.map["user-settings.success"]); $(".dialog #old_pwd").val(""); $(".dialog #new_pwd").val(""); $(".dialog #re_new_pwd").val(""); $("#menu-username").text(username); $("#user-api-key").val(api_key); countlyGlobal.member.username = username; countlyGlobal.member.api_key = api_key; } $(".dialog #save-account-details").removeClass("disabled"); } }); }); var help = _.once(function() { CountlyHelpers.alert(jQuery.i18n.map["help.help-mode-welcome"], "popStyleGreen popStyleGreenWide", {button_title: jQuery.i18n.map["common.okay"] + "!", title: jQuery.i18n.map["help.help-mode-welcome-title"], image: "welcome-to-help-mode"}); }); $("#help-menu").click(function(e) { e.stopPropagation(); $("#help-toggle-cbox").prop("checked", !$("#help-toggle-cbox").prop("checked")); $("#help-toggle").toggleClass("active"); app.tipsify($("#help-toggle").hasClass("active")); if ($("#help-toggle").hasClass("active")) { help(); $.idleTimer('destroy'); clearInterval(self.refreshActiveView); } else { self.refreshActiveView = setInterval(function() { self.performRefresh(self); }, countlyCommon.DASHBOARD_REFRESH_MS); $.idleTimer(countlyCommon.DASHBOARD_IDLE_MS); } }); $("#help-toggle").click(function(e) { e.stopPropagation(); if ($(e.target).attr("id") === "help-toggle-cbox") { $("#help-toggle").toggleClass("active"); app.tipsify($("#help-toggle").hasClass("active")); if ($("#help-toggle").hasClass("active")) { help(); $.idleTimer('destroy'); clearInterval(self.refreshActiveView); } else { self.refreshActiveView = setInterval(function() { self.performRefresh(self); }, countlyCommon.DASHBOARD_REFRESH_MS); $.idleTimer(countlyCommon.DASHBOARD_IDLE_MS); } } }); var logoutRequest = function() { var logoutForm = document.createElement("form"); logoutForm.action = countlyGlobal.path + '/logout'; logoutForm.method = "post"; logoutForm.style.display = "none"; logoutForm.type = "submit"; var logoutForm_csrf = document.createElement("input"); logoutForm_csrf.name = '_csrf'; logoutForm_csrf.value = countlyGlobal.csrf_token; logoutForm.appendChild(logoutForm_csrf); document.body.appendChild(logoutForm); logoutForm.submit(); document.body.removeChild(logoutForm); }; $("#user-logout").click(function(e) { e.preventDefault(); store.remove('countly_active_app'); store.remove('countly_date'); store.remove('countly_location_city'); logoutRequest(); }); $(".beta-button").click(function() { CountlyHelpers.alert("This feature is currently in beta so the data you see in this view might change or disappear into thin air.<br/><br/>If you find any bugs or have suggestions please let us know!<br/><br/><a style='font-weight:500;'>Captain Obvious:</a> You can use the message box that appears when you click the question mark on the bottom right corner of this page.", "black"); }); $("#content").on("click", "#graph-note", function() { CountlyHelpers.popup("#graph-note-popup"); $(".note-date:visible").datepicker({ numberOfMonths: 1, showOtherMonths: true, onSelect: function() { dateText(); } }); $.datepicker.setDefaults($.datepicker.regional[""]); $(".note-date:visible").datepicker("option", $.datepicker.regional[countlyCommon.BROWSER_LANG]); $('.note-popup:visible .time-picker, .note-popup:visible .note-list').slimScroll({ height: '100%', start: 'top', wheelStep: 10, position: 'right', disableFadeOut: true }); $(".note-popup:visible .time-picker span").on("click", function() { $(".note-popup:visible .time-picker span").removeClass("selected"); $(this).addClass("selected"); dateText(); }); $(".note-popup:visible .manage-notes-button").on("click", function() { $(".note-popup:visible .note-create").hide(); $(".note-popup:visible .note-manage").show(); $(".note-popup:visible .create-note-button").show(); $(this).hide(); $(".note-popup:visible .create-note").hide(); }); $(".note-popup:visible .create-note-button").on("click", function() { $(".note-popup:visible .note-create").show(); $(".note-popup:visible .note-manage").hide(); $(".note-popup:visible .manage-notes-button").show(); $(this).hide(); $(".note-popup:visible .create-note").show(); }); dateText(); /** sets selected date text */ function dateText() { var selectedDate = $(".note-date:visible").val(), instance = $(".note-date:visible").data("datepicker"), date = $.datepicker.parseDate(instance.settings.dateFormat || $.datepicker._defaults.dateFormat, selectedDate, instance.settings); $(".selected-date:visible").text(moment(date).format("D MMM YYYY") + ", " + $(".time-picker:visible span.selected").text()); } if (countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID] && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes) { var noteDateIds = _.sortBy(_.keys(countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes), function(el) { return -parseInt(el); }); for (var i = 0; i < noteDateIds.length; i++) { var currNotes = countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes[noteDateIds[i]]; for (var j = 0; j < currNotes.length; j++) { $(".note-popup:visible .note-list").append( '<div class="note">' + '<div class="date" data-dateid="' + noteDateIds[i] + '">' + moment(noteDateIds[i], "YYYYMMDDHH").format("D MMM YYYY, HH:mm") + '</div>' + '<div class="content">' + currNotes[j] + '</div>' + '<div class="delete-note"><i class="fa fa-trash"></i></div>' + '</div>' ); } } } if (!$(".note-popup:visible .note").length) { $(".note-popup:visible .manage-notes-button").hide(); } $('.note-popup:visible .note-content').textcounter({ max: 50, countDown: true, countDownText: jQuery.i18n.map["dashboard.note-title-remaining"] + ": ", }); $(".note-popup:visible .note .delete-note").on("click", function() { var dateId = $(this).siblings(".date").data("dateid"), note = $(this).siblings(".content").text(); $(this).parents(".note").fadeOut().remove(); $.ajax({ type: "POST", url: countlyGlobal.path + '/graphnotes/delete', data: { "app_id": countlyCommon.ACTIVE_APP_ID, "date_id": dateId, "note": note, _csrf: countlyGlobal.csrf_token }, success: function(result) { if (result === false) { return false; } else { updateGlobalNotes({ date_id: dateId, note: note }, "delete"); app.activeView.refresh(); } } }); if (!$(".note-popup:visible .note").length) { $(".note-popup:visible .create-note-button").trigger("click"); $(".note-popup:visible .manage-notes-button").hide(); } }); $(".note-popup:visible .create-note").on("click", function() { if ($(this).hasClass("disabled")) { return true; } $(this).addClass("disabled"); var selectedDate = $(".note-date:visible").val(), instance = $(".note-date:visible").data("datepicker"), date = $.datepicker.parseDate(instance.settings.dateFormat || $.datepicker._defaults.dateFormat, selectedDate, instance.settings), dateId = moment(moment(date).format("D MMM YYYY") + ", " + $(".time-picker:visible span.selected").text(), "D MMM YYYY, HH:mm").format("YYYYMMDDHH"), note = $(".note-popup:visible .note-content").val(); if (!note.length) { $(".note-popup:visible .note-content").addClass("required-border"); $(this).removeClass("disabled"); return true; } else { $(".note-popup:visible .note-content").removeClass("required-border"); } $.ajax({ type: "POST", url: countlyGlobal.path + '/graphnotes/create', data: { "app_id": countlyCommon.ACTIVE_APP_ID, "date_id": dateId, "note": note, _csrf: countlyGlobal.csrf_token }, success: function(result) { if (result === false) { return false; } else { updateGlobalNotes({ date_id: dateId, note: result }, "create"); app.activeView.refresh(); app.recordEvent({ "key": "graph-note", "count": 1, "segmentation": {} }); } } }); $("#overlay").trigger("click"); }); /** function updates global notes * @param {object} noteObj - note object * @param {string} operation - create or delete */ function updateGlobalNotes(noteObj, operation) { var globalNotes = countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes; if (operation === "create") { if (globalNotes) { if (globalNotes[noteObj.date_id]) { countlyCommon.arrayAddUniq(globalNotes[noteObj.date_id], noteObj.note); } else { globalNotes[noteObj.date_id] = [noteObj.note]; } } else { var tmpNote = {}; tmpNote[noteObj.date_id] = [noteObj.note]; countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes = tmpNote; } } else if (operation === "delete") { if (globalNotes) { if (globalNotes[noteObj.date_id]) { globalNotes[noteObj.date_id] = _.without(globalNotes[noteObj.date_id], noteObj.note); } } } } }); // TOPBAR var $topbar = $("#top-bar"), $appNavigation = $("#app-navigation"); $topbar.on("click", ".dropdown", function(e) { var wasActive = $(this).hasClass("clicked"); if ($(this).hasClass('manage-long-tasks-menu-dropdown')) { $("#manage-long-tasks-icon").removeClass('unread'); app.haveUnreadReports = false; $(".orange-side-notification-banner-wrapper").css("display", "none"); } $topbar.find(".dropdown").removeClass("clicked"); if (wasActive) { $(this).removeClass("clicked"); } else { $(this).find(".nav-search input").val(""); $(this).find(".list").scrollTop(0); $(this).addClass("clicked"); var _this = $(this); setTimeout(function() { _this.find(".nav-search input").focus(); }, 50); } e.stopPropagation(); }); $topbar.on("click", ".dropdown .nav-search", function(e) { e.stopPropagation(); }); /** * Clear highlights class from app items * @param {array} filteredItems - filtered app items list */ function clearHighlights(filteredItems) { var length = filteredItems.length; for (var i = 0; i < length; i++) { $(filteredItems[i]).removeClass('highlighted-app-item'); } } var arrowed = false; var currentIndex; $('#app-navigation').on('keyup', '.nav-search input', function(e) { var code = (e.keyCode || e.which); var filteredItems = $('#app-navigation > div.menu > div.list > .filtered-app-item'); var indexLimit = filteredItems.length; if (code === 38) { clearHighlights(filteredItems); if (!arrowed) { arrowed = true; currentIndex = indexLimit - 1; } else { currentIndex = currentIndex - 1; if (currentIndex === -1) { currentIndex = indexLimit - 1; } } $(filteredItems[currentIndex]).addClass('highlighted-app-item'); } else if (code === 40) { clearHighlights(filteredItems); if (!arrowed) { arrowed = true; currentIndex = 0; } else { currentIndex = currentIndex + 1; if (currentIndex === indexLimit) { currentIndex = 0; } } $(filteredItems[currentIndex]).addClass('highlighted-app-item'); } else if (code === 13) { $('#app-navigation').removeClass('clicked'); var appKey = $(filteredItems[currentIndex]).data("key"), appId = $(filteredItems[currentIndex]).data("id"), appName = $(filteredItems[currentIndex]).find(".name").text(), appImage = $(filteredItems[currentIndex]).find(".app-icon").css("background-image"); $("#active-app-icon").css("background-image", appImage); $("#active-app-name").text(appName); $("#active-app-name").attr('title', appName); if (self.activeAppKey !== appKey) { self.activeAppName = appName; self.activeAppKey = appKey; self.switchApp(appId); setTimeout(function() { window.location.reload(); }, 1000); } } else { return; } }); $topbar.on("click", ".dropdown .item", function(e) { $topbar.find(".dropdown").removeClass("clicked"); e.stopPropagation(); }); $("body").on("click", function() { $topbar.find(".dropdown").removeClass("clicked"); }); $("#user_api_key_item").click(function() { $(this).find('input').first().select(); }); $topbar.on("click", "#hide-sidebar-button", function() { $("#hide-sidebar-button").toggleClass("active"); var $analyticsMainView = $("#analytics-main-view"); $analyticsMainView.find("#sidebar").toggleClass("hidden"); $analyticsMainView.find("#content-container").toggleClass("cover-left"); }); // Prevent body scroll after list inside dropdown is scrolled till the end // Applies to any element that has prevent-body-scroll class as well $("document").on('DOMMouseScroll mousewheel', ".dropdown .list, .prevent-body-scroll", function(ev) { var $this = $(this), scrollTop = this.scrollTop, scrollHeight = this.scrollHeight, height = $this.innerHeight(), delta = ev.originalEvent.wheelDelta, up = delta > 0; if (ev.target.className === 'item scrollable') { return true; } var prevent = function() { ev.stopPropagation(); ev.preventDefault(); ev.returnValue = false; return false; }; if (!up && -delta > scrollHeight - height - scrollTop) { // Scrolling down, but this will take us past the bottom. $this.scrollTop(scrollHeight); return prevent(); } else if (up && delta > scrollTop) { // Scrolling up, but this will take us past the top. $this.scrollTop(0); return prevent(); } }, {passive: false}); $appNavigation.on("click", ".item", function() { var appKey = $(this).data("key"), appId = $(this).data("id"), appName = $(this).find(".name").text(), appImage = $(this).find(".app-icon").css("background-image"); $("#active-app-icon").css("background-image", appImage); $("#active-app-name").text(appName); $("#active-app-name").attr('title', appName); if (self.activeAppKey !== appKey) { self.activeAppName = appName; self.activeAppKey = appKey; self.switchApp(appId); setTimeout(function() { window.location.reload(); }, 1000); } }); $appNavigation.on("click", function() { var appList = $(this).find(".list"), apps = _.sortBy(countlyGlobal.apps, function(app) { return (app.name + "").toLowerCase(); }); appList.html(""); for (var i = 0; i < apps.length; i++) { var currApp = apps[i]; var app = $("<div></div>"); app.addClass("item searchable"); app.data("key", currApp.key); app.data("id", currApp._id); var appIcon = $("<div></div>"); appIcon.addClass("app-icon"); appIcon.css("background-image", "url(" + countlyGlobal.cdn + "appimages/" + currApp._id + ".png"); var appName = $("<div></div>"); appName.addClass("name"); appName.attr("title", currApp.name); appName.text(currApp.name); app.append(appIcon); app.append(appName); appList.append(app); } }); }); if (!_.isEmpty(countlyGlobal.apps)) { if (!countlyCommon.ACTIVE_APP_ID) { var activeApp = (countlyGlobal.member && countlyGlobal.member.active_app_id && countlyGlobal.apps[countlyGlobal.member.active_app_id]) ? countlyGlobal.apps[countlyGlobal.member.active_app_id] : countlyGlobal.defaultApp; countlyCommon.setActiveApp(activeApp._id); self.activeAppName = activeApp.name; $('#active-app-name').html(activeApp.name); $('#active-app-name').attr('title', activeApp.name); $("#active-app-icon").css("background-image", "url('" + countlyGlobal.cdn + "appimages/" + countlyCommon.ACTIVE_APP_ID + ".png')"); } else { $("#active-app-icon").css("background-image", "url('" + countlyGlobal.cdn + "appimages/" + countlyCommon.ACTIVE_APP_ID + ".png')"); $("#active-app-name").text(countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].name); $('#active-app-name').attr('title', countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].name); self.activeAppName = countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].name; } } else { $("#new-install-overlay").show(); } $.idleTimer(countlyCommon.DASHBOARD_IDLE_MS); $(document).bind("idle.idleTimer", function() { clearInterval(self.refreshActiveView); }); $(document).bind("active.idleTimer", function() { self.activeView.restart(); self.refreshActiveView = setInterval(function() { self.performRefresh(self); }, countlyCommon.DASHBOARD_REFRESH_MS); }); $.fn.dataTableExt.oPagination.four_button = { "fnInit": function(oSettings, nPaging, fnCallbackDraw) { var nFirst = document.createElement('span'); var nPrevious = document.createElement('span'); var nNext = document.createElement('span'); var nLast = document.createElement('span'); nFirst.innerHTML = "<i class='fa fa-angle-double-left'></i>"; nPrevious.innerHTML = "<i class='fa fa-angle-left'></i>"; nNext.innerHTML = "<i class='fa fa-angle-right'></i>"; nLast.innerHTML = "<i class='fa fa-angle-double-right'></i>"; nFirst.className = "paginate_button first"; nPrevious.className = "paginate_button previous"; nNext.className = "paginate_button next"; nLast.className = "paginate_button last"; nPaging.appendChild(nFirst); nPaging.appendChild(nPrevious); nPaging.appendChild(nNext); nPaging.appendChild(nLast); $(nFirst).click(function() { oSettings.oApi._fnPageChange(oSettings, "first"); fnCallbackDraw(oSettings); }); $(nPrevious).click(function() { oSettings.oApi._fnPageChange(oSettings, "previous"); fnCallbackDraw(oSettings); }); $(nNext).click(function() { oSettings.oApi._fnPageChange(oSettings, "next"); fnCallbackDraw(oSettings); }); $(nLast).click(function() { oSettings.oApi._fnPageChange(oSettings, "last"); fnCallbackDraw(oSettings); }); $(nFirst).bind('selectstart', function() { return false; }); $(nPrevious).bind('selectstart', function() { return false; }); $(nNext).bind('selectstart', function() { return false; }); $(nLast).bind('selectstart', function() { return false; }); }, "fnUpdate": function(oSettings /*,fnCallbackDraw*/) { if (!oSettings.aanFeatures.p) { return; } var an = oSettings.aanFeatures.p; for (var i = 0, iLen = an.length; i < iLen; i++) { var buttons = an[i].getElementsByTagName('span'); if (oSettings._iDisplayStart === 0) { buttons[0].className = "paginate_disabled_previous"; buttons[1].className = "paginate_disabled_previous"; } else { buttons[0].className = "paginate_enabled_previous"; buttons[1].className = "paginate_enabled_previous"; } if (oSettings.fnDisplayEnd() === oSettings.fnRecordsDisplay()) { buttons[2].className = "paginate_disabled_next"; buttons[3].className = "paginate_disabled_next"; } else { buttons[2].className = "paginate_enabled_next"; buttons[3].className = "paginate_enabled_next"; } } } }; $.fn.dataTableExt.oApi.fnStandingRedraw = function(oSettings) { if (oSettings.oFeatures.bServerSide === false) { var before = oSettings._iDisplayStart; oSettings.oApi._fnReDraw(oSettings); // iDisplayStart has been reset to zero - so lets change it back oSettings._iDisplayStart = before; oSettings.oApi._fnCalculateEnd(oSettings); } // draw the 'current' page oSettings.oApi._fnDraw(oSettings); }; /** getCustomDateInt * @param {string} s - date string * @returns {number} number representating date */ function getCustomDateInt(s) { if (s.indexOf("W") === 0) { s = s.replace(",", ""); s = s.replace("W", ""); dateParts = s.split(" "); return (parseInt(dateParts[0])) + parseInt(dateParts.pop() * 10000); } s = moment(s, countlyCommon.getDateFormat(countlyCommon.periodObj.dateString)).format(countlyCommon.periodObj.dateString); var dateParts = ""; if (s.indexOf(":") !== -1) { if (s.indexOf(",") !== -1) { s = s.replace(/,|:/g, ""); dateParts = s.split(" "); return parseInt((countlyCommon.getMonths().indexOf(dateParts[1]) + 1) * 1000000) + parseInt(dateParts[0]) * 10000 + parseInt(dateParts[2]); } else { return parseInt(s.replace(':', '')); } } else if (s.length === 3) { return countlyCommon.getMonths().indexOf(s) + 1; } else if (s.indexOf("W") === 0) { s = s.replace(",", ""); s = s.replace("W", ""); dateParts = s.split(" "); return (parseInt(dateParts[0])) + parseInt(dateParts.pop() * 10000); } else { s = s.replace(",", ""); dateParts = s.split(" "); if (dateParts.length === 3) { return (parseInt(dateParts[2]) * 10000) + parseInt((countlyCommon.getMonths().indexOf(dateParts[1]) + 1) * 100) + parseInt(dateParts[0]); } else { if (dateParts[0].length === 3) { return parseInt((countlyCommon.getMonths().indexOf(dateParts[0]) + 1) * 100) + parseInt(dateParts[1] * 10000); } else { return parseInt((countlyCommon.getMonths().indexOf(dateParts[1]) + 1) * 100) + parseInt(dateParts[0]); } } } } jQuery.fn.dataTableExt.oSort['customDate-asc'] = function(x, y) { x = getCustomDateInt(x); y = getCustomDateInt(y); return ((x < y) ? -1 : ((x > y) ? 1 : 0)); }; jQuery.fn.dataTableExt.oSort['customDate-desc'] = function(x, y) { x = getCustomDateInt(x); y = getCustomDateInt(y); return ((x < y) ? 1 : ((x > y) ? -1 : 0)); }; /** getDateRangeInt * @param {string} s - range string * @returns {number} number representing range */ function getDateRangeInt(s) { s = s.split("-")[0]; var mEnglish = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; if (s.indexOf(":") !== -1) { var mName = (s.split(" ")[1]).split(",")[0]; return s.replace(mName, parseInt(mEnglish.indexOf(mName))).replace(/[:, ]/g, ""); } else { var parts = s.split(" "); if (parts.length > 1) { return parseInt(mEnglish.indexOf(parts[1]) * 100) + parseInt(parts[0]); } else { return parts[0].replace(/[><]/g, ""); } } } jQuery.fn.dataTableExt.oSort['dateRange-asc'] = function(x, y) { x = getDateRangeInt(x); y = getDateRangeInt(y); return ((x < y) ? -1 : ((x > y) ? 1 : 0)); }; jQuery.fn.dataTableExt.oSort['dateRange-desc'] = function(x, y) { x = getDateRangeInt(x); y = getDateRangeInt(y); return ((x < y) ? 1 : ((x > y) ? -1 : 0)); }; jQuery.fn.dataTableExt.oSort['percent-asc'] = function(x, y) { x = parseFloat($("<a></a>").html(x).text().replace("%", "")); y = parseFloat($("<a></a>").html(y).text().replace("%", "")); return ((x < y) ? -1 : ((x > y) ? 1 : 0)); }; jQuery.fn.dataTableExt.oSort['percent-desc'] = function(x, y) { x = parseFloat($("<a></a>").html(x).text().replace("%", "")); y = parseFloat($("<a></a>").html(y).text().replace("%", "")); return ((x < y) ? 1 : ((x > y) ? -1 : 0)); }; jQuery.fn.dataTableExt.oSort['formatted-num-asc'] = function(x, y) { 'use strict'; // Define vars var a = [], b = []; // Match any character except: digits (0-9), dash (-), period (.), or backslash (/) and replace those characters with empty string. x = x.replace(/[^\d\-\.\/]/g, ''); // eslint-disable-line y = y.replace(/[^\d\-\.\/]/g, ''); // eslint-disable-line // Handle simple fractions if (x.indexOf('/') >= 0) { a = x.split("/"); x = parseInt(a[0], 10) / parseInt(a[1], 10); } if (y.indexOf('/') >= 0) { b = y.split("/"); y = parseInt(b[0], 10) / parseInt(b[1], 10); } return x - y; }; jQuery.fn.dataTableExt.oSort['formatted-num-desc'] = function(x, y) { 'use strict'; // Define vars var a = [], b = []; // Match any character except: digits (0-9), dash (-), period (.), or backslash (/) and replace those characters with empty string. x = x.replace(/[^\d\-\.\/]/g, ''); // eslint-disable-line y = y.replace(/[^\d\-\.\/]/g, ''); // eslint-disable-line // Handle simple fractions if (x.indexOf('/') >= 0) { a = x.split("/"); x = parseInt(a[0], 10) / parseInt(a[1], 10); } if (y.indexOf('/') >= 0) { b = y.split("/"); y = parseInt(b[0], 10) / parseInt(b[1], 10); } return y - x; }; jQuery.fn.dataTableExt.oSort['loyalty-asc'] = function(x, y) { x = countlySession.getLoyaltyIndex(x); y = countlySession.getLoyaltyIndex(y); return ((x < y) ? -1 : ((x > y) ? 1 : 0)); }; jQuery.fn.dataTableExt.oSort['loyalty-desc'] = function(x, y) { x = countlySession.getLoyaltyIndex(x); y = countlySession.getLoyaltyIndex(y); return ((x < y) ? 1 : ((x > y) ? -1 : 0)); }; jQuery.fn.dataTableExt.oSort['frequency-asc'] = function(x, y) { x = countlySession.getFrequencyIndex(x); y = countlySession.getFrequencyIndex(y); return ((x < y) ? -1 : ((x > y) ? 1 : 0)); }; jQuery.fn.dataTableExt.oSort['frequency-desc'] = function(x, y) { x = countlySession.getFrequencyIndex(x); y = countlySession.getFrequencyIndex(y); return ((x < y) ? 1 : ((x > y) ? -1 : 0)); }; jQuery.fn.dataTableExt.oSort['session-duration-asc'] = function(x, y) { x = countlySession.getDurationIndex(x); y = countlySession.getDurationIndex(y); return ((x < y) ? -1 : ((x > y) ? 1 : 0)); }; jQuery.fn.dataTableExt.oSort['session-duration-desc'] = function(x, y) { x = countlySession.getDurationIndex(x); y = countlySession.getDurationIndex(y); return ((x < y) ? 1 : ((x > y) ? -1 : 0)); }; jQuery.fn.dataTableExt.oSort['app_versions-asc'] = function(x, y) { return countlyCommon.compareVersions(x, y); }; jQuery.fn.dataTableExt.oSort['app_versions-desc'] = function(x, y) { return countlyCommon.compareVersions(x, y); }; jQuery.fn.dataTableExt.oSort['format-ago-asc'] = function(x, y) { return x - y; }; jQuery.fn.dataTableExt.oSort['format-ago-desc'] = function(x, y) { return y - x; }; /** saves current page * @param {object} dtable - data table * @param {object} settings -data table settings */ function saveCurrentPage(dtable, settings) { var data = dtable.fnGetData(); countlyCommon.dtSettings = countlyCommon.dtSettings || []; var previosTableStatus = countlyCommon.dtSettings.filter(function(item) { return (item.viewId === app.activeView.cid && item.selector === settings.sTableId); })[0]; if (previosTableStatus) { previosTableStatus.dataLength = data.length; previosTableStatus.page = settings._iDisplayStart / settings._iDisplayLength; } else { countlyCommon.dtSettings.push({ viewId: app.activeView.cid, selector: settings.sTableId, dataLength: data.length, page: settings._iDisplayStart / settings._iDisplayLength }); } } /** sets current page * @param {object} dtable - data table * @param {object} settings -data table settings */ function setCurrentPage(dtable, settings) { var tablePersistSettings = countlyCommon.dtSettings.filter(function(item) { return (item.viewId === app.activeView.cid && item.selector === settings.sTableId); })[0]; if (tablePersistSettings && tablePersistSettings.dataLength === dtable.fnGetData().length) { dtable.fnPageChange(tablePersistSettings.page); } } /** gets page size * @param {object} settings -data table settings * @returns {boolean} states if dtable is in active view */ function getPageSize(settings) { var pageSizeSettings = countlyCommon.getPersistentSettings().pageSizeSettings; if (!pageSizeSettings) { pageSizeSettings = []; } var tablePersistSettings = pageSizeSettings.filter(function(item) { return (item.viewId === app.activeView.cid && item.selector === settings.sTableId); })[0]; var pageSize; if (tablePersistSettings && tablePersistSettings.pageSize) { pageSize = tablePersistSettings.pageSize; } else if (settings.oInit && settings.oInit.iDisplayLength) { pageSize = settings.oInit.iDisplayLength; } else { pageSize = settings.iDisplayLength || settings._iDisplayLength || 50; } return pageSize; } $.extend(true, $.fn.dataTable.defaults, { "sDom": '<"dataTable-top"lfpT>t<"dataTable-bottom"i>', "bAutoWidth": false, "bLengthChange": true, "bPaginate": true, "sPaginationType": "four_button", "iDisplayLength": 50, "bDestroy": true, "bDeferRender": true, "oLanguage": { "sZeroRecords": jQuery.i18n.map["common.table.no-data"], "sInfoEmpty": jQuery.i18n.map["common.table.no-data"], "sEmptyTable": jQuery.i18n.map["common.table.no-data"], "sInfo": jQuery.i18n.map["common.showing"], "sInfoFiltered": jQuery.i18n.map["common.filtered"], "sSearch": jQuery.i18n.map["common.search"], "sLengthMenu": jQuery.i18n.map["common.show-items"] + "<input type='number' id='dataTables_length_input'/>" }, "fnInitComplete": function(oSettings) { var dtable = this; var saveHTML = "<div class='save-table-data' data-help='help.datatables-export'><i class='fa fa-download'></i></div>", searchHTML = "<div class='search-table-data'><i class='fa fa-search'></i></div>", tableWrapper = $("#" + oSettings.sTableId + "_wrapper"); countlyCommon.dtSettings = countlyCommon.dtSettings || []; tableWrapper.bind('page', function(e, _oSettings) { var dataTable = $(e.target).dataTable(); saveCurrentPage(dataTable, _oSettings); }); tableWrapper.bind('init', function(e, _oSettings) { var dataTable = $(e.target).dataTable(); if (_oSettings.oFeatures.bServerSide) { setTimeout(function() { setCurrentPage(dataTable, _oSettings); oSettings.isInitFinished = true; tableWrapper.show(); }, 0); } else { setCurrentPage(dataTable, _oSettings); oSettings.isInitFinished = true; tableWrapper.show(); } }); var selectButton = "<div class='select-column-table-data' style='display:none;'><p class='ion-gear-a'></p></div>"; $(selectButton).insertBefore(tableWrapper.find(".dataTables_filter")); $(saveHTML).insertBefore(tableWrapper.find(".DTTT_container")); $(searchHTML).insertBefore(tableWrapper.find(".dataTables_filter")); tableWrapper.find(".dataTables_filter").html(tableWrapper.find(".dataTables_filter").find("input").attr("Placeholder", jQuery.i18n.map["common.search"]).clone(true)); tableWrapper.find(".search-table-data").on("click", function() { $(this).next(".dataTables_filter").toggle(); $(this).next(".dataTables_filter").find("input").focus(); }); tableWrapper.find(".dataTables_length").show(); tableWrapper.find('#dataTables_length_input').bind('change.DT', function(/*e, _oSettings*/) { //store.set("iDisplayLength", $(this).val()); if ($(this).val() && $(this).val().length > 0) { var pageSizeSettings = countlyCommon.getPersistentSettings().pageSizeSettings; if (!pageSizeSettings) { pageSizeSettings = []; } var tableId = oSettings.sTableId; if (!tableId) { return; } var previosTableStatus = pageSizeSettings.filter(function(item) { return (item.viewId === app.activeView.cid && item.selector === tableId); })[0]; if (previosTableStatus) { previosTableStatus.pageSize = parseInt($(this).val()); } else { pageSizeSettings.push({ viewId: app.activeView.cid, selector: tableId, pageSize: parseInt($(this).val()) }); } countlyCommon.setPersistentSettings({ pageSizeSettings: pageSizeSettings }); } }); var exportDrop; if (oSettings.oFeatures.bServerSide) { //slowdown serverside filtering tableWrapper.find('.dataTables_filter input').unbind(); var timeout = null; tableWrapper.find('.dataTables_filter input').bind('keyup', function() { var $this = this; if (timeout) { clearTimeout(timeout); timeout = null; } timeout = setTimeout(function() { oSettings.oInstance.fnFilter($this.value); }, 1000); }); var exportView = $(dtable).data("view") || "activeView"; var exportAPIData = app[exportView].getExportAPI ? app[exportView].getExportAPI(oSettings.sTableId) : null; var exportQueryData = app[exportView].getExportQuery ? app[exportView].getExportQuery(oSettings.sTableId) : null; if (exportAPIData || exportQueryData) { //create export dialog var position = 'left middle'; if (oSettings.oInstance && oSettings.oInstance.addColumnExportSelector === true) { position = 'left top'; } exportDrop = new CountlyDrop({ target: tableWrapper.find('.save-table-data')[0], content: "", position: position, classes: "server-export", constrainToScrollParent: false, remove: true, openOn: "click" }); exportDrop.on("open", function() { if (exportAPIData) { $(".server-export .countly-drop-content").empty().append(CountlyHelpers.export(oSettings._iRecordsDisplay, app[exportView].getExportAPI(oSettings.sTableId), null, true, oSettings.oInstance).removeClass("dialog")); } else if (exportQueryData) { $(".server-export .countly-drop-content").empty().append(CountlyHelpers.export(oSettings._iRecordsDisplay, app[exportView].getExportQuery(oSettings.sTableId), null, null, oSettings.oInstance).removeClass("dialog")); } exportDrop.position(); }); } else { // tableWrapper.find(".dataTables_length").hide(); //create export dialog var item = tableWrapper.find('.save-table-data')[0]; if (item) { exportDrop = new CountlyDrop({ target: tableWrapper.find('.save-table-data')[0], content: "", position: 'left middle', classes: "server-export", constrainToScrollParent: false, remove: true, openOn: "click" }); exportDrop.on("open", function() { $(".server-export .countly-drop-content").empty().append(CountlyHelpers.tableExport(dtable, { api_key: countlyGlobal.member.api_key }, null, oSettings).removeClass("dialog")); exportDrop.position(); }); } } } else { // tableWrapper.find(".dataTables_length").hide(); //create export dialog var item2 = tableWrapper.find('.save-table-data')[0]; if (item2) { exportDrop = new CountlyDrop({ target: tableWrapper.find('.save-table-data')[0], content: "", position: 'right middle', classes: "server-export", constrainToScrollParent: false, remove: true, openOn: "click" }); exportDrop.on("open", function() { $(".server-export .countly-drop-content").empty().append(CountlyHelpers.tableExport(dtable, { api_key: countlyGlobal.member.api_key }).removeClass("dialog")); exportDrop.position(); }); } } //tableWrapper.css({"min-height": tableWrapper.height()}); }, fnPreDrawCallback: function(oSettings) { var tableWrapper = $("#" + oSettings.sTableId + "_wrapper"); if (oSettings.isInitFinished) { tableWrapper.show(); } else { oSettings._iDisplayLength = getPageSize(oSettings); $('.dataTables_length').find('input[type=number]').val(oSettings._iDisplayLength); tableWrapper.hide(); } if (tableWrapper.find(".table-placeholder").length === 0) { var $placeholder = $('<div class="table-placeholder"><div class="top"></div><div class="header"></div></div>'); tableWrapper.append($placeholder); } if (tableWrapper.find(".table-loader").length === 0) { tableWrapper.append("<div class='table-loader'></div>"); } }, fnDrawCallback: function(oSettings) { var tableWrapper = $("#" + oSettings.sTableId + "_wrapper"); tableWrapper.find(".dataTable-bottom").show(); tableWrapper.find(".table-placeholder").remove(); tableWrapper.find(".table-loader").remove(); } }); $.fn.dataTableExt.sErrMode = 'throw'; $(document).ready(function() { setTimeout(function() { self.onAppSwitch(countlyCommon.ACTIVE_APP_ID, true, true); }, 1); }); }, /** * Localize all found html elements with data-localize and data-help-localize attributes * @param {jquery_object} el - jquery reference to parent element which contents to localize, by default all document is localized if not provided * @memberof app */ localize: function(el) { var helpers = { onlyFirstUpper: function(str) { return str.charAt(0).toUpperCase() + str.slice(1).toLowerCase(); }, upper: function(str) { return str.toUpperCase(); } }; // translate help module (el ? el.find('[data-help-localize]') : $("[data-help-localize]")).each(function() { var elem = $(this); if (typeof elem.data("help-localize") !== "undefined") { elem.data("help", jQuery.i18n.map[elem.data("help-localize")]); } }); // translate dashboard (el ? el.find('[data-localize]') : $("[data-localize]")).each(function() { var elem = $(this), toLocal = elem.data("localize").split("!"), localizedValue = ""; if (toLocal.length === 2) { if (helpers[toLocal[0]]) { localizedValue = helpers[toLocal[0]](jQuery.i18n.map[toLocal[1]]); } else { localizedValue = jQuery.i18n.prop(toLocal[0], (toLocal[1]) ? jQuery.i18n.map[toLocal[1]] : ""); } } else { localizedValue = jQuery.i18n.map[elem.data("localize")]; } if (elem.is("input[type=text]") || elem.is("input[type=password]") || elem.is("textarea")) { elem.attr("placeholder", localizedValue); } else if (elem.is("input[type=button]") || elem.is("input[type=submit]")) { elem.attr("value", localizedValue); } else { elem.html(localizedValue); } }); }, /** * Toggle showing tooltips, which are usually used in help mode for all elements containing css class help-zone-vs or help-zone-vb and having data-help attributes (which are generated automatically from data-help-localize attributes upon localization) * @param {boolean} enable - if true tooltips will be shown on hover, if false tooltips will be disabled * @param {jquery_object} el - jquery reference to parent element which contents to check for tooltips, by default all document is checked if not provided * @memberof app * @instance */ tipsify: function(enable, el) { var vs = el ? el.find('.help-zone-vs') : $('.help-zone-vs'), vb = el ? el.find('.help-zone-vb') : $('.help-zone-vb'), both = el ? el.find('.help-zone-vs, .help-zone-vb') : $(".help-zone-vs, .help-zone-vb"); vb.tipsy({ gravity: $.fn.tipsy.autoNS, trigger: 'manual', title: function() { return $(this).data("help") || ""; }, fade: true, offset: 5, cssClass: 'yellow', opacity: 1, html: true }); vs.tipsy({ gravity: $.fn.tipsy.autoNS, trigger: 'manual', title: function() { return $(this).data("help") || ""; }, fade: true, offset: 5, cssClass: 'yellow narrow', opacity: 1, html: true }); if (enable) { both.off('mouseenter mouseleave') .on('mouseenter', function() { $(this).tipsy("show"); }) .on('mouseleave', function() { $(this).tipsy("hide"); }); } else { both.off('mouseenter mouseleave'); } }, /** * Register new app type as mobile, web, desktop, etc. You can create new plugin to add new app type with its own dashboard * @param {string} name - name of the app type as mobile, web, desktop etc * @param {countlyView} view - instance of the countlyView to show as main dashboard for provided app type * @memberof app * @instance * @example * app.addAppType("mobile", MobileDashboardView); */ addAppType: function(name, view) { this.appTypes[name] = new view(); var menu = $("#default-type").clone(); menu.attr("id", name + "-type"); $("#sidebar-menu").append(menu); //run all queued type menus if (this._menuForTypes[name]) { for (var i = 0; i < this._menuForTypes[name].length; i++) { this.addMenuForType(name, this._menuForTypes[name][i].category, this._menuForTypes[name][i].node); } this._menuForTypes[name] = null; } //run all queued type submenus if (this._subMenuForTypes[name]) { for (i = 0; i < this._subMenuForTypes[name].length; i++) { this.addSubMenuForType(name, this._subMenuForTypes[name][i].parent_code, this._subMenuForTypes[name][i].node); } this._subMenuForTypes[name] = null; } //run all queued all type menus for (i = 0; i < this._menuForAllTypes.length; i++) { this.addMenuForType(name, this._menuForAllTypes[i].category, this._menuForAllTypes[i].node); } //run all queued all type submenus for (i = 0; i < this._subMenuForAllTypes.length; i++) { this.addSubMenuForType(name, this._subMenuForAllTypes[i].parent_code, this._subMenuForAllTypes[i].node); } }, /** * Add callback to be called when user changes app in dashboard, which can be used globally, outside of the view * @param {function} callback - function receives app_id param which is app id of the new app to which user switched * @memberof app * @instance * @example * app.addAppSwitchCallback(function(appId){ * countlyCrashes.loadList(appId); * }); */ addAppSwitchCallback: function(callback) { this.appSwitchCallbacks.push(callback); }, /** * Add callback to be called when user changes app in Managment -> Applications section, useful when providing custom input additions to app editing for different app types * @param {function} callback - function receives app_id param which is app id and type which is app type * @memberof app * @instance * @example * app.addAppManagementSwitchCallback(function(appId, type){ * if (type == "mobile") { * addPushHTMLIfNeeded(type); * $("#view-app .appmng-push").show(); * } else { * $("#view-app .appmng-push").hide(); * } * }); */ addAppManagementSwitchCallback: function(callback) { this.appManagementSwitchCallbacks.push(callback); }, /** * Modify app object on app create/update before submitting it to server * @param {function} callback - function args object with all data that will be submitted to server on app create/update * @memberof app * @instance * @example * app.addAppObjectModificatorfunction(args){ * if (args.type === "mobile") { * //do something for mobile * } * }); */ addAppObjectModificator: function(callback) { this.appObjectModificators.push(callback); }, /** * Add a countlyManagementView-extending view which will be displayed in accordion tabs on Management->Applications screen * @memberof app * @param {string} plugin - plugin name * @param {string} title - plugin title * @param {object} View - plugin view */ addAppManagementView: function(plugin, title, View) { this.appManagementViews[plugin] = {title: title, view: View}; }, /** * Add a countlyManagementView-extending view which will be displayed in accordion tabs on Management->Applications screen * @memberof app * @param {string} plugin - plugin name * @param {string} title - plugin title * @param {Array} inputs - plugin inputs */ addAppManagementInput: function(plugin, title, inputs) { this.appManagementViews[plugin] = {title: title, inputs: inputs}; }, /** * Add additional settings to app management. Allows you to inject html with css classes app-read-settings, app-write-settings and using data-id attribute for the key to store in app collection. And if your value or input needs additional processing, you may add the callbacks here * @param {string} id - the same value on your input data-id attributes * @param {object} options - different callbacks for data modification * @param {function} options.toDisplay - function to be called when data is prepared for displaying, pases reference to html element with app-read-settings css class in which value should be displayed * @param {function} options.toInput - function to be called when data is prepared for input, pases reference to html input element with app-write-settings css class in which value should be placed for editing * @param {function} options.toSave - function to be called when data is prepared for saving, pases reference to object args that will be sent to server ad html input element with app-write-settings css class from which value should be taken and placed in args * @param {function} options.toInject - function to be called when to inject HTML into app management view * @memberof app * @instance * @example * app.addAppSetting("my_setting", { * toDisplay: function(appId, elem){$(elem).text(process(countlyGlobal['apps'][appId]["my_setting"]));}, * toInput: function(appId, elem){$(elem).val(process(countlyGlobal['apps'][appId]["my_setting"]));}, * toSave: function(appId, args, elem){ * args.my_setting = process($(elem).val()); * }, * toInject: function(){ * var addApp = '<tr class="help-zone-vs" data-help-localize="manage-apps.app-my_setting">'+ * '<td>'+ * '<span data-localize="management-applications.my_setting"></span>'+ * '</td>'+ * '<td>'+ * '<input type="text" value="" class="app-write-settings" data-localize="placeholder.my_setting" data-id="my_setting">'+ * '</td>'+ * '</tr>'; * * $("#add-new-app table .table-add").before(addApp); * * var editApp = '<tr class="help-zone-vs" data-help-localize="manage-apps.app-my_settingt">'+ * '<td>'+ * '<span data-localize="management-applications.my_setting"></span>'+ * '</td>'+ * '<td>'+ * '<div class="read app-read-settings" data-id="my_setting"></div>'+ * '<div class="edit">'+ * '<input type="text" value="" class="app-write-settings" data-id="my_setting" data-localize="placeholder.my_setting">'+ * '</div>'+ * '</td>'+ * '</tr>'; * * $(".app-details table .table-edit").before(editApp); * } * }); */ addAppSetting: function(id, options) { this.appSettings[id] = options; }, /** * Add callback to be called when user changes app type in UI in Managment -> Applications section (even without saving app type, just chaning in UI), useful when providing custom input additions to app editing for different app types * @param {function} callback - function receives type which is app type * @memberof app * @instance * @example * app.addAppAddTypeCallback(function(type){ * if (type == "mobile") { * $("#view-app .appmng-push").show(); * } else { * $("#view-app .appmng-push").hide(); * } * }); */ addAppAddTypeCallback: function(callback) { this.appAddTypeCallbacks.push(callback); }, /** * Add callback to be called when user open user edit UI in Managment -> Users section (even without saving, just opening), useful when providing custom input additions to user editing * @param {function} callback - function receives user object and paramm which can be true if saving data, false if opening data, string to modify data * @memberof app * @instance */ addUserEditCallback: function(callback) { this.userEditCallbacks.push(callback); }, /** * Add custom data export handler from datatables to csv/xls exporter. Provide exporter name and callback function. * Then add the same name as sExport attribute to the first datatables column. * Then when user will want to export data from this table, your callback function will be called to get the data. * You must perpare array of objects all with the same keys, where keys are columns and value are table data and return it from callback * to be processed by exporter. * @param {string} name - name of the export to expect in datatables sExport attribute * @param {function} callback - callback to call when getting data * @memberof app * @instance * @example * app.addDataExport("userinfo", function(){ * var ret = []; * var elem; * for(var i = 0; i < tableData.length; i++){ * //use same keys for each array element with different user data * elem ={ * "fullname": tableData[i].firstname + " " + tableData[i].lastname, * "job": tableData[i].company + ", " + tableData[i].jobtitle, * "email": tableData[i].email * }; * ret.push(elem); * } * //return array * return ret; * }); */ addDataExport: function(name, callback) { this.dataExports[name] = callback; }, /** * Add callback to be called everytime new view/page is loaded, so you can modify view with javascript after it has been loaded * @param {string} view - view url/hash or with possible # as wildcard or simply providing # for any view * @param {function} callback - function to be called when view loaded * @memberof app * @instance * @example <caption>Adding to single specific view with specific url</caption> * //this will work only for view bind to #/analytics/events * app.addPageScript("/analytics/events", function(){ * $("#event-nav-head").after( * "<a href='#/analytics/events/compare'>" + * "<div id='compare-events' class='event-container'>" + * "<div class='icon'></div>" + * "<div class='name'>" + jQuery.i18n.map["compare.button"] + "</div>" + * "</div>" + * "</a>" * ); * }); * @example <caption>Add to all view subpages</caption> * //this will work /users/ and users/1 and users/abs etc * app.addPageScript("/users#", modifyUserDetailsForPush); * @example <caption>Adding script to any view</caption> * //this will work for any view * app.addPageScript("#", function(){ * alert("I am an annoying popup appearing on each view"); * }); */ addPageScript: function(view, callback) { if (!this.pageScripts[view]) { this.pageScripts[view] = []; } this.pageScripts[view].push(callback); }, /** * Add callback to be called everytime view is refreshed, because view may reset some html, and we may want to remodify it again. By default this happens every 10 seconds, so not cpu intensive tasks * @param {string} view - view url/hash or with possible # as wildcard or simply providing # for any view * @param {function} callback - function to be called when view refreshed * @memberof app * @instance * @example <caption>Adding to single specific view with specific url</caption> * //this will work only for view bind to #/analytics/events * app.addPageScript("/analytics/events", function(){ * $("#event-nav-head").after( * "<a href='#/analytics/events/compare'>" + * "<div id='compare-events' class='event-container'>" + * "<div class='icon'></div>" + * "<div class='name'>" + jQuery.i18n.map["compare.button"] + "</div>" + * "</div>" + * "</a>" * ); * }); * @example <caption>Add to all view subpage refreshed</caption> * //this will work /users/ and users/1 and users/abs etc * app.addRefreshScript("/users#", modifyUserDetailsForPush); * @example <caption>Adding script to any view</caption> * //this will work for any view * app.addRefreshScript("#", function(){ * alert("I am an annoying popup appearing on each refresh of any view"); * }); */ addRefreshScript: function(view, callback) { if (!this.refreshScripts[view]) { this.refreshScripts[view] = []; } this.refreshScripts[view].push(callback); }, onAppSwitch: function(appId, refresh, firstLoad) { if (appId !== 0) { this._isFirstLoad = firstLoad; jQuery.i18n.map = JSON.parse(app.origLang); if (!refresh) { app.main(true); if (window.components && window.components.slider && window.components.slider.instance) { window.components.slider.instance.close(); } app.updateLongTaskViewsNofification(true); } $("#sidebar-menu .sidebar-menu").hide(); var type = countlyGlobal.apps[appId].type; if ($("#sidebar-menu #" + type + "-type").length) { $("#sidebar-menu #" + type + "-type").show(); } else { $("#sidebar-menu #default-type").show(); } for (var i = 0; i < this.appSwitchCallbacks.length; i++) { this.appSwitchCallbacks[i](appId); } app.localize(); } }, onAppManagementSwitch: function(appId, type) { for (var i = 0; i < this.appManagementSwitchCallbacks.length; i++) { this.appManagementSwitchCallbacks[i](appId, type || countlyGlobal.apps[appId].type); } if ($("#app-add-name").length) { var newAppName = $("#app-add-name").val(); $("#app-container-new .name").text(newAppName); $(".new-app-name").text(newAppName); } }, onAppAddTypeSwitch: function(type) { for (var i = 0; i < this.appAddTypeCallbacks.length; i++) { this.appAddTypeCallbacks[i](type); } }, onUserEdit: function(user, param) { for (var i = 0; i < this.userEditCallbacks.length; i++) { param = this.userEditCallbacks[i](user, param); } return param; }, pageScript: function() { //scripts to be executed on each view change $("#month").text(moment().year()); $("#day").text(moment().format("MMMM, YYYY")); $("#yesterday").text(moment().subtract(1, "days").format("Do")); var self = this; $(document).ready(function() { var selectedDateID = countlyCommon.getPeriod(); if (Object.prototype.toString.call(selectedDateID) !== '[object Array]') { $("#" + selectedDateID).addClass("active"); } var i = 0; var l = 0; if (self.pageScripts[Backbone.history.fragment]) { for (i = 0, l = self.pageScripts[Backbone.history.fragment].length; i < l; i++) { self.pageScripts[Backbone.history.fragment][i](); } } for (var k in self.pageScripts) { if (k !== '#' && k.indexOf('#') !== -1 && Backbone.history.fragment.match("^" + k.replace(/#/g, '.*'))) { for (i = 0, l = self.pageScripts[k].length; i < l; i++) { self.pageScripts[k][i](); } } } if (self.pageScripts["#"]) { for (i = 0, l = self.pageScripts["#"].length; i < l; i++) { self.pageScripts["#"][i](); } } // Translate all elements with a data-help-localize or data-localize attribute self.localize(); if ($("#help-toggle").hasClass("active")) { $('.help-zone-vb').tipsy({ gravity: $.fn.tipsy.autoNS, trigger: 'manual', title: function() { return ($(this).data("help")) ? $(this).data("help") : ""; }, fade: true, offset: 5, cssClass: 'yellow', opacity: 1, html: true }); $('.help-zone-vs').tipsy({ gravity: $.fn.tipsy.autoNS, trigger: 'manual', title: function() { return ($(this).data("help")) ? $(this).data("help") : ""; }, fade: true, offset: 5, cssClass: 'yellow narrow', opacity: 1, html: true }); $.idleTimer('destroy'); clearInterval(self.refreshActiveView); $(".help-zone-vs, .help-zone-vb").hover( function() { $(this).tipsy("show"); }, function() { $(this).tipsy("hide"); } ); } $(document).off("chart:changed", ".usparkline").on("chart:changed", ".usparkline", function() { $(this).show(); }); $(document).off("chart:changed", ".dsparkline").on("chart:changed", ".dsparkline", function() { $(this).show(); }); $(".usparkline").peity("bar", { width: "100%", height: "30", colour: "#83C986", strokeColour: "#83C986", strokeWidth: 2 }); $(".dsparkline").peity("bar", { width: "100%", height: "30", colour: "#DB6E6E", strokeColour: "#DB6E6E", strokeWidth: 2 }); CountlyHelpers.setUpDateSelectors(self.activeView); $(window).click(function() { $("#date-picker").hide(); $(".date-time-picker").hide(); $(".cly-select").removeClass("active"); }); $("#date-picker").click(function(e) { e.stopPropagation(); }); $(".date-time-picker").click(function(e) { e.stopPropagation(); }); var dateTo; var dateFrom; $("#date-picker-button").click(function(e) { $("#date-picker").toggle(); $("#date-picker-button").toggleClass("active"); var date; if (self.dateToSelected) { date = new Date(self.dateToSelected); dateTo.datepicker("setDate", date); dateFrom.datepicker("option", "maxDate", date); } else { date = new Date(); date.setHours(0, 0, 0, 0); self.dateToSelected = date.getTime(); dateTo.datepicker("setDate", new Date(self.dateToSelected)); dateFrom.datepicker("option", "maxDate", new Date(self.dateToSelected)); } if (self.dateFromSelected) { date = new Date(self.dateFromSelected); dateFrom.datepicker("setDate", date); dateTo.datepicker("option", "minDate", date); } else { var extendDate = moment(dateTo.datepicker("getDate"), "MM-DD-YYYY").subtract(30, 'days').toDate(); extendDate.setHours(0, 0, 0, 0); dateFrom.datepicker("setDate", extendDate); self.dateFromSelected = extendDate.getTime(); dateTo.datepicker("option", "minDate", new Date(self.dateFromSelected)); } $("#date-from-input").val(moment(dateFrom.datepicker("getDate"), "MM-DD-YYYY").format("MM/DD/YYYY")); $("#date-to-input").val(moment(dateTo.datepicker("getDate"), "MM-DD-YYYY").format("MM/DD/YYYY")); dateTo.datepicker("refresh"); dateFrom.datepicker("refresh"); //setSelectedDate(); e.stopPropagation(); }); dateTo = $("#date-to").datepicker({ numberOfMonths: 1, showOtherMonths: true, maxDate: moment().toDate(), onSelect: function(selectedDate) { var instance = $(this).data("datepicker"), date = $.datepicker.parseDate(instance.settings.dateFormat || $.datepicker._defaults.dateFormat, selectedDate, instance.settings); date.setHours(0, 0, 0, 0); if (date.getTime() < self.dateFromSelected) { self.dateFromSelected = date.getTime(); } $("#date-to-input").val(moment(date).format("MM/DD/YYYY")); dateFrom.datepicker("option", "maxDate", date); self.dateToSelected = date.getTime(); }, beforeShowDay: function(date) { var ts = date.getTime(); if (ts < moment($("#date-to-input").val(), "MM/DD/YYYY") && ts >= moment($("#date-from-input").val(), "MM/DD/YYYY")) { return [true, "in-range", ""]; } else { return [true, "", ""]; } } }); dateFrom = $("#date-from").datepicker({ numberOfMonths: 1, showOtherMonths: true, maxDate: moment().subtract(1, 'days').toDate(), onSelect: function(selectedDate) { var instance = $(this).data("datepicker"), date = $.datepicker.parseDate(instance.settings.dateFormat || $.datepicker._defaults.dateFormat, selectedDate, instance.settings); date.setHours(0, 0, 0, 0); if (date.getTime() > self.dateToSelected) { self.dateToSelected = date.getTime(); } $("#date-from-input").val(moment(date).format("MM/DD/YYYY")); dateTo.datepicker("option", "minDate", date); self.dateFromSelected = date.getTime(); }, beforeShowDay: function(date) { var ts = date.getTime(); if (ts <= moment($("#date-to-input").val(), "MM/DD/YYYY") && ts > moment($("#date-from-input").val(), "MM/DD/YYYY")) { return [true, "in-range", ""]; } else { return [true, "", ""]; } } }); $("#date-from-input").keyup(function(event) { if (event.keyCode === 13) { var date = moment($("#date-from-input").val(), "MM/DD/YYYY"); if (date.format("MM/DD/YYYY") !== $("#date-from-input").val()) { var jsDate = $('#date-from').datepicker('getDate'); $("#date-from-input").val(moment(jsDate.getTime()).format("MM/DD/YYYY")); } else { dateTo.datepicker("option", "minDate", date.toDate()); if (date.valueOf() > self.dateToSelected) { date.startOf('day'); self.dateToSelected = date.valueOf(); dateFrom.datepicker("option", "maxDate", date.toDate()); dateTo.datepicker("setDate", date.toDate()); $("#date-to-input").val(date.format("MM/DD/YYYY")); } dateFrom.datepicker("setDate", date.toDate()); } } }); $("#date-to-input").keyup(function(event) { if (event.keyCode === 13) { var date = moment($("#date-to-input").val(), "MM/DD/YYYY"); if (date.format("MM/DD/YYYY") !== $("#date-to-input").val()) { var jsDate = $('#date-to').datepicker('getDate'); $("#date-to-input").val(moment(jsDate.getTime()).format("MM/DD/YYYY")); } else { dateFrom.datepicker("option", "maxDate", date.toDate()); if (date.toDate() < self.dateFromSelected) { date.startOf('day'); self.dateFromSelected = date.valueOf(); dateTo.datepicker("option", "minDate", date.toDate()); dateFrom.datepicker("setDate", date.toDate()); $("#date-from-input").val(date.format("MM/DD/YYYY")); } dateTo.datepicker("setDate", date.toDate()); } } }); /** function sets selected date */ function setSelectedDate() { $("#selected-date").text(countlyCommon.getDateRangeForCalendar()); } $.datepicker.setDefaults($.datepicker.regional[""]); $("#date-to").datepicker("option", $.datepicker.regional[countlyCommon.BROWSER_LANG]); $("#date-from").datepicker("option", $.datepicker.regional[countlyCommon.BROWSER_LANG]); $("#date-submit").click(function() { if (!self.dateFromSelected && !self.dateToSelected) { return false; } countlyCommon.setPeriod([ self.dateFromSelected - countlyCommon.getOffsetCorrectionForTimestamp(self.dateFromSelected), self.dateToSelected - countlyCommon.getOffsetCorrectionForTimestamp(self.dateToSelected) + 24 * 60 * 60 * 1000 - 1 ]); self.activeView.dateChanged(); app.runRefreshScripts(); setSelectedDate(); $("#date-selector .calendar").removeClass("active"); $(".date-selector").removeClass("selected").removeClass("active"); $("#date-picker").hide(); }); $("#date-cancel").click(function() { $("#date-selector .calendar").removeClass("selected").removeClass("active"); $("#date-picker").hide(); }); $("#date-cancel").click(function() { $("#date-selector .calendar").removeClass("selected").removeClass("active"); $("#date-picker").hide(); }); setSelectedDate(); $('.scrollable').slimScroll({ height: '100%', start: 'top', wheelStep: 10, position: 'right', disableFadeOut: true }); $(".checkbox").on('click', function() { $(this).toggleClass("checked"); }); $(".resource-link").on('click', function() { if ($(this).data("link")) { CountlyHelpers.openResource($(this).data("link")); } }); $("#sidebar-menu").find(".item").each(function() { if ($(this).next().hasClass("sidebar-submenu") && $(this).find(".ion-chevron-right").length === 0) { $(this).append("<span class='ion-chevron-right'></span>"); } }); $('.nav-search').on('input', "input", function() { var searchText = new RegExp($(this).val().toLowerCase().replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&')), searchInside = $(this).parent().next().find(".searchable"); searchInside.filter(function() { return !(searchText.test($(this).text().toLowerCase())); }).css('display', 'none').removeClass('filtered-app-item'); searchInside.filter(function() { return searchText.test($(this).text().toLowerCase()); }).css('display', 'block').addClass('filtered-app-item'); }); $(document).on('input', "#listof-apps .search input", function() { var searchText = new RegExp($(this).val().toLowerCase()), searchInside = $(this).parent().next().find(".searchable"); searchInside.filter(function() { return !(searchText.test($(this).text().toLowerCase())); }).css('display', 'none'); searchInside.filter(function() { return searchText.test($(this).text().toLowerCase()); }).css('display', 'block'); }); $(document).on('mouseenter', ".bar-inner", function() { var number = $(this).parent().next(); number.text($(this).data("item")); number.css({ "color": $(this).css("background-color") }); }); $(document).on('mouseleave', ".bar-inner", function() { var number = $(this).parent().next(); number.text(number.data("item")); number.css({ "color": $(this).parent().find(".bar-inner:first-child").css("background-color") }); }); /* Auto expand left navigation (events, management > apps etc) if ellipsis is applied to children */ var closeLeftNavExpand; var leftNavSelector = "#event-nav, #app-management-bar, #configs-title-bar"; var $leftNav = $(leftNavSelector); $leftNav.hoverIntent({ over: function() { var parentLeftNav = $(this).parents(leftNavSelector); if (leftNavNeedsExpand(parentLeftNav)) { parentLeftNav.addClass("expand"); } }, out: function() { // Delay shrinking and allow movement towards the top section cancel it closeLeftNavExpand = setTimeout(function() { $(this).parents(leftNavSelector).removeClass("expand"); }, 500); }, selector: ".slimScrollDiv" }); $leftNav.on("mousemove", function() { if ($(this).hasClass("expand")) { clearTimeout(closeLeftNavExpand); } }); $leftNav.on("mouseleave", function() { $(this).removeClass("expand"); }); /** Checks if nav needs to expand @param {object} $nav html element @returns {boolean} true or false */ function leftNavNeedsExpand($nav) { var makeExpandable = false; $nav.find(".event-container:not(#compare-events) .name, .app-container .name, .config-container .name").each(function(z, el) { if (el.offsetWidth < el.scrollWidth) { makeExpandable = true; return false; } }); return makeExpandable; } /* End of auto expand code */ }); } }); Backbone.history || (Backbone.history = new Backbone.History); Backbone.history._checkUrl = Backbone.history.checkUrl; Backbone.history.urlChecks = []; Backbone.history.checkOthers = function() { var proceed = true; for (var i = 0; i < Backbone.history.urlChecks.length; i++) { if (!Backbone.history.urlChecks[i]()) { proceed = false; } } return proceed; }; Backbone.history.checkUrl = function() { if (Backbone.history.checkOthers()) { Backbone.history._checkUrl(); } }; Backbone.history.noHistory = function(hash) { if (history && history.replaceState) { history.replaceState(undefined, undefined, hash); } else { location.replace(hash); } }; Backbone.history.__checkUrl = Backbone.history.checkUrl; Backbone.history._getFragment = Backbone.history.getFragment; Backbone.history.appIds = []; for (var i in countlyGlobal.apps) { Backbone.history.appIds.push(i); } Backbone.history.getFragment = function() { var fragment = Backbone.history._getFragment(); if (fragment.indexOf("/" + countlyCommon.ACTIVE_APP_ID) === 0) { fragment = fragment.replace("/" + countlyCommon.ACTIVE_APP_ID, ""); } return fragment; }; Backbone.history.checkUrl = function() { store.set("countly_fragment_name", Backbone.history._getFragment()); var app_id = Backbone.history._getFragment().split("/")[1] || ""; if (countlyCommon.APP_NAMESPACE !== false && countlyCommon.ACTIVE_APP_ID !== 0 && countlyCommon.ACTIVE_APP_ID !== app_id && Backbone.history.appIds.indexOf(app_id) === -1) { Backbone.history.noHistory("#/" + countlyCommon.ACTIVE_APP_ID + Backbone.history._getFragment()); app_id = countlyCommon.ACTIVE_APP_ID; } if (countlyCommon.ACTIVE_APP_ID !== 0 && countlyCommon.ACTIVE_APP_ID !== app_id && Backbone.history.appIds.indexOf(app_id) !== -1) { app.switchApp(app_id, function() { if (Backbone.history.checkOthers()) { Backbone.history.__checkUrl(); } }); } else { if (Backbone.history.checkOthers()) { Backbone.history.__checkUrl(); } } }; /* var checkGlobalAdminOnlyPermission = function() { var userCheckList = [ "/manage/users", "/manage/apps" ]; var adminCheckList = [ "/manage/users" ]; if (!countlyGlobal.member.global_admin && !countlyGlobal.config.autonomous) { var existed = false; var checkList = userCheckList; if (countlyAuth.getAdminApps(countlyGlobal.member) && countlyAuth.getAdminApps(countlyGlobal.member).length) { checkList = adminCheckList; } checkList.forEach(function(item) { if (Backbone.history.getFragment().indexOf(item) > -1) { existed = true; } }); if (existed === true) { window.location.hash = "/"; return false; } } return true; }; */ //Backbone.history.urlChecks.push(checkGlobalAdminOnlyPermission); //initial hash check (function() { if (!Backbone.history.getFragment() && store.get("countly_fragment_name")) { Backbone.history.noHistory("#" + store.get("countly_fragment_name")); } else { var app_id = Backbone.history._getFragment().split("/")[1] || ""; if (countlyCommon.ACTIVE_APP_ID === app_id || Backbone.history.appIds.indexOf(app_id) !== -1) { //we have app id if (app_id !== countlyCommon.ACTIVE_APP_ID) { // but it is not currently selected app, so let' switch countlyCommon.setActiveApp(app_id); $("#active-app-name").text(countlyGlobal.apps[app_id].name); $('#active-app-name').attr('title', countlyGlobal.apps[app_id].name); $("#active-app-icon").css("background-image", "url('" + countlyGlobal.path + "appimages/" + app_id + ".png')"); } } else if (countlyCommon.APP_NAMESPACE !== false) { //add current app id Backbone.history.noHistory("#/" + countlyCommon.ACTIVE_APP_ID + Backbone.history._getFragment()); } } })(); var app = new AppRouter(); /** * Navigate to another hash address programmatically, without trigering view route and without leaving trace in history, if possible * @param {string} hash - url path (hash part) to change * @memberof app * @example * //you are at #/manage/systemlogs * app.noHistory("#/manage/systemlogs/query/{}"); * //now pressing back would not go to #/manage/systemlogs */ app.noHistory = function(hash) { if (countlyCommon.APP_NAMESPACE !== false) { hash = "#/" + countlyCommon.ACTIVE_APP_ID + hash.substr(1); } if (history && history.replaceState) { history.replaceState(undefined, undefined, hash); } else { location.replace(hash); } }; //collects requests for active views to dscard them if views changed $.ajaxPrefilter(function(options, originalOptions, jqXHR) { //add to options for independent!!! var myurl = ""; var mydata = ""; if (originalOptions && originalOptions.url) { myurl = originalOptions.url; } if (originalOptions && originalOptions.data) { mydata = JSON.stringify(originalOptions.data); } //request which is not killed on view change(only on app change) jqXHR.my_set_url = myurl; jqXHR.my_set_data = mydata; if (originalOptions && (originalOptions.type === 'GET' || originalOptions.type === 'get') && originalOptions.url.substr(0, 2) === '/o') { if (originalOptions.data && originalOptions.data.preventGlobalAbort && originalOptions.data.preventGlobalAbort === true) { return true; } if (originalOptions.data && originalOptions.data.preventRequestAbort && originalOptions.data.preventRequestAbort === true) { if (app._myRequests[myurl] && app._myRequests[myurl][mydata]) { jqXHR.abort(); //we already have same working request } else { jqXHR.always(function(data, textStatus, jqXHR1) { //if success jqxhr object is third, errored jqxhr object is in first parameter. if (jqXHR1 && jqXHR1.my_set_url && jqXHR1.my_set_data) { if (app._myRequests[jqXHR1.my_set_url] && app._myRequests[jqXHR1.my_set_url][jqXHR1.my_set_data]) { delete app._myRequests[jqXHR1.my_set_url][jqXHR1.my_set_data]; } } else if (data && data.my_set_url && data.my_set_data) { if (app._myRequests[data.my_set_url] && app._myRequests[data.my_set_url][data.my_set_data]) { delete app._myRequests[data.my_set_url][data.my_set_data]; } } }); //save request in our object if (!app._myRequests[myurl]) { app._myRequests[myurl] = {}; } app._myRequests[myurl][mydata] = jqXHR; } } else { if (app.activeView) { if (app.activeView._myRequests[myurl] && app.activeView._myRequests[myurl][mydata]) { jqXHR.abort(); //we already have same working request } else { jqXHR.always(function(data, textStatus, jqXHR1) { //if success jqxhr object is third, errored jqxhr object is in first parameter. if (jqXHR1 && jqXHR1.my_set_url && jqXHR1.my_set_data) { if (app.activeView._myRequests[jqXHR1.my_set_url] && app.activeView._myRequests[jqXHR1.my_set_url][jqXHR1.my_set_data]) { delete app.activeView._myRequests[jqXHR1.my_set_url][jqXHR1.my_set_data]; } } else if (data && data.my_set_url && data.my_set_data) { if (app.activeView._myRequests[data.my_set_url] && app.activeView._myRequests[data.my_set_url][data.my_set_data]) { delete app.activeView._myRequests[data.my_set_url][data.my_set_data]; } } }); //save request in our object if (!app.activeView._myRequests[myurl]) { app.activeView._myRequests[myurl] = {}; } app.activeView._myRequests[myurl][mydata] = jqXHR; } } } } });
1
14,581
Instead of setting it to null, i think lets set it to {}
Countly-countly-server
js
@@ -23,8 +23,8 @@ import ( ) const ( - TPRGroup = "monitoring.coreos.com" - TPRVersion = "v1alpha1" + Group = "monitoring.coreos.com" + Version = "v1alpha1" ) type MonitoringV1alpha1Interface interface {
1
// Copyright 2016 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha1 import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/dynamic" "k8s.io/client-go/pkg/api" "k8s.io/client-go/rest" ) const ( TPRGroup = "monitoring.coreos.com" TPRVersion = "v1alpha1" ) type MonitoringV1alpha1Interface interface { RESTClient() rest.Interface PrometheusesGetter AlertmanagersGetter ServiceMonitorsGetter } type MonitoringV1alpha1Client struct { restClient rest.Interface dynamicClient *dynamic.Client } func (c *MonitoringV1alpha1Client) Prometheuses(namespace string) PrometheusInterface { return newPrometheuses(c.restClient, c.dynamicClient, namespace) } func (c *MonitoringV1alpha1Client) Alertmanagers(namespace string) AlertmanagerInterface { return newAlertmanagers(c.restClient, c.dynamicClient, namespace) } func (c *MonitoringV1alpha1Client) ServiceMonitors(namespace string) ServiceMonitorInterface { return newServiceMonitors(c.restClient, c.dynamicClient, namespace) } func (c *MonitoringV1alpha1Client) RESTClient() rest.Interface { return c.restClient } func NewForConfig(c *rest.Config) (*MonitoringV1alpha1Client, error) { config := *c setConfigDefaults(&config) client, err := rest.RESTClientFor(&config) if err != nil { return nil, err } dynamicClient, err := dynamic.NewClient(&config) if err != nil { return nil, err } return &MonitoringV1alpha1Client{client, dynamicClient}, nil } func setConfigDefaults(config *rest.Config) { config.GroupVersion = &schema.GroupVersion{ Group: TPRGroup, Version: TPRVersion, } config.APIPath = "/apis" config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} return }
1
8,541
We should bump this before releasing and remove all legacy fields that are safe to do so. Since we cannot have multiple versions anyway there's little value in walking around with the "alpha" stamp.
prometheus-operator-prometheus-operator
go
@@ -59,6 +59,15 @@ AS_CASE($ax_cv_cxx_compiler_vendor, AX_CXX_COMPILE_STDCXX([11], [noext], [mandatory]) X_AC_ENABLE_SANITIZER +if test "x$san_enabled" != "xno" ; then + AC_DEFINE([DEEPBIND], [0], + [deepbind is unsupported with asan, musl and so-forth]) +else + AC_DEFINE([DEEPBIND], [RTLD_DEEPBIND], + [deepbind is unsupported with asan, musl and so-forth]) +fi + + LT_INIT AC_PROG_AWK
1
## # Prologue ## AC_INIT([flux-core], m4_esyscmd([git describe --always | awk '/.*/ {sub(/^v/, ""); printf "%s",$1; exit}'])) AC_CONFIG_AUX_DIR([config]) AC_CONFIG_MACRO_DIR([config]) AC_CONFIG_SRCDIR([NEWS]) AC_CANONICAL_SYSTEM ## # If runstatedir not explicitly set on command line, use '/run' as default # N.B. runstatedir is not set at all in autoconf < 2.70. ## if test "$runstatedir" = '${localstatedir}/run' || test -z "$runstatedir"; then AC_SUBST([runstatedir],[/run]) fi X_AC_EXPAND_INSTALL_DIRS ## # Automake support ## AM_INIT_AUTOMAKE([subdir-objects tar-pax]) AM_SILENT_RULES([yes]) AM_CONFIG_HEADER([config/config.h]) AM_MAINTAINER_MODE AC_DEFINE([_GNU_SOURCE], 1, [Define _GNU_SOURCE so that we get all necessary prototypes]) ## # Initialize pkg-config for PKG_CHECK_MODULES to avoid conditional issues ## PKG_PROG_PKG_CONFIG ## # Checks for programs ## AC_PROG_CC_C99 AM_PROG_CC_C_O AX_COMPILER_VENDOR AS_CASE($ax_cv_c_compiler_vendor, [clang | gnu], [ WARNING_CFLAGS="-Wall -Werror -Werror=missing-field-initializers -Wno-strict-aliasing -Wno-error=deprecated-declarations" AC_SUBST([WARNING_CFLAGS]) ] ) AC_PROG_CXX # Check compiler vendor for c++, need to temporarily update AC_LANG AC_LANG_PUSH([C++]) AX_COMPILER_VENDOR AC_LANG_POP AS_CASE($ax_cv_cxx_compiler_vendor, [clang | gnu], [ WARNING_CXXFLAGS=$WARNING_CFLAGS AC_SUBST([WARNING_CXXFLAGS]) ] ) AX_CXX_COMPILE_STDCXX([11], [noext], [mandatory]) X_AC_ENABLE_SANITIZER LT_INIT AC_PROG_AWK AC_ARG_ENABLE([docs], AS_HELP_STRING([--disable-docs], [disable building docs])) AS_IF([test "x$enable_docs" != "xno"], [ AC_CHECK_PROGS(ADOC, [a2x asciidoctor]) AS_IF([test "$ADOC" == "a2x"], [ ADOC_FORMAT_OPT="--format" AC_SUBST([ADOC_FORMAT_OPT]) ]) AS_IF([test "$ADOC" == "asciidoctor"], [ ADOC_FORMAT_OPT="--backend" AC_SUBST([ADOC_FORMAT_OPT]) ]) ]) AM_CONDITIONAL([ENABLE_DOCS], [test -n "$ADOC"]) AC_CHECK_PROG(ASPELL,[aspell],[aspell]) ## # Checks for header files. ## AC_HEADER_STDC AC_CHECK_HEADERS( \ pthread.h \ getopt.h \ fcntl.h \ limits.h \ strings.h \ syslog.h \ unistd.h \ [sys/cdefs.h] \ [sys/param.h] \ stdarg.h \ locale.h \ xlocale.h \ endian.h \ inttypes.h \ ) ## # Checks for typedefs, structures, and compiler characteristics ## AC_C_BIGENDIAN AC_C_CONST AC_TYPE_SIZE_T AX_COMPILE_CHECK_SIZEOF(int) AX_COMPILE_CHECK_SIZEOF(long) AX_COMPILE_CHECK_SIZEOF(long long) AX_COMPILE_CHECK_SIZEOF(uintptr_t, [#include <stdint.h>]) AX_COMPILE_CHECK_SIZEOF(size_t, [#include <stdint.h>]) ## # Checks for library functions ## AC_CHECK_FUNCS( \ getopt_long \ vsnprintf \ vsscanf \ realloc \ strcasecmp \ strdup \ strerror \ snprintf \ vsnprintf \ vasprintf \ open \ vsyslog \ strncasecmp \ setlocale \ uselocale \ ) X_AC_CHECK_PTHREADS X_AC_CHECK_COND_LIB(util, forkpty) X_AC_CHECK_COND_LIB(rt, clock_gettime) X_AC_CHECK_COND_LIB(dl, dlerror) X_AC_MALLOC AC_CHECK_LIB(m, floor) AC_MSG_CHECKING(--enable-python argument) AC_ARG_ENABLE(python, [ --enable-python[=OPTS] Include Python bindings. [default=yes] [OPTS=no/yes]], , [enable_python="yes"]) AC_MSG_RESULT($enable_python) if test "$enable_python" = "yes"; then AX_PYTHON_DEVEL([>='2.7']) AM_PATH_PYTHON([$ac_python_version]) if test "X$PYTHON" != "X"; then # Flag for PYTHON_LDFLAGS workaround below. if test -n "$PYTHON_LDFLAGS"; then ac_python_ldflags_set_by_user=true fi AM_CHECK_PYMOD(cffi, [cffi.__version_info__ >= (1,1)], , [AC_MSG_ERROR([could not find python module cffi, version 1.1+ required])] ) AM_CHECK_PYMOD(six, [StrictVersion(six.__version__) >= StrictVersion('1.9.0')], , [AC_MSG_ERROR([could not find python module six, version 1.9.0+ required])] ) # Remove -L<path> from PYTHON_LDFLAGS if it is in a standard path # (e.g. /usr/lib64). Placing a standard path earlier in the linker # search can lead to linking problems. # # Logic below assumes only newer Python versions, protected by # above check for atleast Python 2.7. if test "$ac_python_ldflags_set_by_user" != "true"; then AC_CHECK_LIB([$ac_python_library], [PyArg_ParseTuple], [ac_python_in_ld_path=true]) if test "$ac_python_in_ld_path" = "true"; then AC_MSG_NOTICE([Removing -L$ac_python_libdir from PYTHON_LDFLAGS]) PYTHON_LDFLAGS="-l$ac_python_library" fi fi python_ok=yes fi AS_VAR_SET(fluxpydir, $pyexecdir/flux) AC_SUBST(fluxpydir) AS_VAR_SET(fluxsodir, $pyexecdir/_flux) AC_SUBST(fluxsodir) AS_VAR_SET(fluxpymoddir, $pyexecdir/flux/modules) AC_SUBST(fluxpymoddir) if test "$python_ok" != "yes"; then AC_MSG_ERROR([could not configure python]) fi fi AM_CONDITIONAL([HAVE_PYTHON], [test "$enable_python" = yes]) AC_ARG_ENABLE([pylint], [AS_HELP_STRING([--enable-pylint], [Enable pylint checks of python bindings])],, [enable_pylint="no"] ) AS_IF([test "x$enable_pylint" = "xyes"], [ AC_CHECK_PROG(PYLINT,[pylint],[pylint]) AS_IF([test "x$PYLINT" != "xpylint"], [AC_MSG_ERROR([No pylint found in PATH])]) AM_CHECK_PYMOD(pylint, [StrictVersion(pylint.__version__) >= StrictVersion('1.4.5')], , [AC_MSG_ERROR([could not find python module pylint, version 1.4.5+ required])] ) ]) AM_CONDITIONAL([ENABLE_PYLINT], [test "x$PYLINT" = "xpylint"]) AX_PROG_LUA([5.1],[5.3]) AX_LUA_HEADERS AX_LUA_LIBS X_AC_ZEROMQ X_AC_MUNGE X_AC_JANSSON X_AC_YAMLCPP PKG_CHECK_MODULES([HWLOC], [hwloc >= 1.11.1], [], []) PKG_CHECK_MODULES([SQLITE], [sqlite3], [], []) LX_FIND_MPI AM_CONDITIONAL([HAVE_MPI], [test "$have_C_mpi" = yes]) AX_VALGRIND_H AX_CODE_COVERAGE AC_ARG_WITH([flux-security], AS_HELP_STRING([--with-flux-security], [Build with flux-security])) AS_IF([test "x$with_flux_security" = "xyes"], [ PKG_CHECK_MODULES([FLUX_SECURITY], [flux-security], [flux_sec_incdir=`$PKG_CONFIG --variable=includedir flux-security`], [flux_sec_incdir=;]) AS_IF([test "x$flux_sec_incdir" = x], [AC_MSG_ERROR([couldn't find flux-security or include directory])]) AC_DEFINE([HAVE_FLUX_SECURITY], [1], [Define flux-security is available]) AC_SUBST(FLUX_SECURITY_INCDIR, $flux_sec_incdir) ]) AM_CONDITIONAL([HAVE_FLUX_SECURITY], [test "x$with_flux_security" = "xyes"]) AC_ARG_ENABLE(caliper, [ --enable-caliper[=OPTS] Use caliper for profiling. [default=no] [OPTS=no/yes]], , [enable_caliper="no"]) if test "$enable_caliper" = "yes"; then PKG_CHECK_MODULES([CALIPER], [caliper], [], []) CFLAGS="${CFLAGS} ${CALIPER_CFLAGS} " # Do not use CALIPER_LIBS, only link to libcaliper-stub LIBS="${LIBS} $(pkg-config --libs-only-L caliper) -lcaliper-stub -lrt " AC_DEFINE([HAVE_CALIPER], [1], [Define if you have libcaliper]) fi ## # Check for systemd ## RRA_WITH_SYSTEMD_UNITDIR ## # Embedded libev ## m4_include([src/common/libev/libev.m4]) AC_PKGCONFIG ## # Project directories ## AS_VAR_SET(fluxrcdir, $sysconfdir/flux) AC_SUBST(fluxrcdir) AS_VAR_SET(fluxrc1dir, $sysconfdir/flux/rc1.d) AC_SUBST(fluxrc1dir) AS_VAR_SET(fluxrc3dir, $sysconfdir/flux/rc3.d) AC_SUBST(fluxrc3dir) AS_VAR_SET(fluxcfdir, $sysconfdir/flux/conf.d) AC_SUBST(fluxcfdir) AS_VAR_SET(fluxlibexecdir, $libexecdir/flux) AC_SUBST(fluxlibexecdir) AS_VAR_SET(fluxcmddir, $libexecdir/flux/cmd) AC_SUBST(fluxcmddir) AS_VAR_SET(fluxlibdir, $libdir/flux) AC_SUBST(fluxlibdir) AS_VAR_SET(fluxmoddir, $libdir/flux/modules) AC_SUBST(fluxmoddir) AS_VAR_SET(fluxconnectordir, $libdir/flux/connectors) AC_SUBST(fluxconnectordir) AS_VAR_SET(fluxincludedir, $includedir/flux) AC_SUBST(fluxincludedir) AS_VAR_SET(fluxcoreincludedir, $includedir/flux/core) AC_SUBST(fluxcoreincludedir) adl_RECURSIVE_EVAL([$bindir], fluxbindir) AS_VAR_SET(fluxbindir, $fluxbindir) AC_SUBST(fluxbindir) adl_RECURSIVE_EVAL([$luadir], fluxluadir) AS_VAR_SET(fluxluadir, $fluxluadir) AC_SUBST(fluxluadir) ## # Macros to avoid repetition in Makefiles.am's ## fluxmod_ldflags="$san_ld_zdef_flag -avoid-version -export-symbols-regex '^mod_(main|name|service)\$\$' --disable-static -shared -export-dynamic" AC_SUBST(fluxmod_ldflags) fluxlib_ldflags="-shared -export-dynamic --disable-static $san_ld_zdef_flag" AC_SUBST(fluxlib_ldflags) ## # Epilogue ## AC_CONFIG_FILES( \ Makefile \ src/Makefile \ src/common/Makefile \ src/common/libtap/Makefile \ src/common/liblsd/Makefile \ src/common/libutil/Makefile \ src/common/libev/Makefile \ src/common/libminilzo/Makefile \ src/common/libpmi/Makefile \ src/common/libflux/Makefile \ src/common/libkvs/Makefile \ src/common/libkz/Makefile \ src/common/libjsc/Makefile \ src/common/libjob/Makefile \ src/common/libzio/Makefile \ src/common/libsubprocess/Makefile \ src/common/libcompat/Makefile \ src/common/liboptparse/Makefile \ src/common/libidset/Makefile \ src/common/libjobspec/Makefile \ src/common/libjobspec/flux-jobspec.pc \ src/common/libtomlc99/Makefile \ src/bindings/Makefile \ src/bindings/lua/Makefile \ src/bindings/python/Makefile \ src/bindings/python/flux/Makefile \ src/bindings/python/flux/core/Makefile \ src/bindings/python/_flux/Makefile \ src/broker/Makefile \ src/cmd/Makefile \ src/connectors/Makefile \ src/connectors/local/Makefile \ src/connectors/shmem/Makefile \ src/connectors/loop/Makefile \ src/connectors/ssh/Makefile \ src/modules/Makefile \ src/modules/connector-local/Makefile \ src/modules/kvs/Makefile \ src/modules/kvs-watch/Makefile \ src/modules/content-sqlite/Makefile \ src/modules/barrier/Makefile \ src/modules/wreck/Makefile \ src/modules/resource-hwloc/Makefile \ src/modules/cron/Makefile \ src/modules/aggregator/Makefile \ src/modules/pymod/Makefile \ src/modules/userdb/Makefile \ src/modules/job-ingest/Makefile \ src/test/Makefile \ etc/Makefile \ etc/flux-core.pc \ etc/flux-pmi.pc \ etc/flux-optparse.pc \ etc/flux-idset.pc \ etc/flux.service \ doc/Makefile \ doc/man1/Makefile \ doc/man3/Makefile \ doc/man7/Makefile \ doc/test/Makefile \ t/Makefile \ t/fluxometer/conf.lua \ t/fluxometer/conf.lua.installed \ ) AC_CONFIG_LINKS([ \ t/fluxometer.lua:t/fluxometer.lua \ ]) AC_OUTPUT AS_IF([test "x$enable_docs" != "xno"], [ if test -z "$ADOC"; then AC_MSG_WARN([No asciidoc formatter found. Manual pages will not be generated.]) fi ])
1
21,601
I might suggest a different name for the `DEEPBIND` config.h macro. Perhaps `FLUX_DEEPBIND` to give a hint in the code that we're using a locally provided define.
flux-framework-flux-core
c
@@ -393,7 +393,7 @@ func init() { proto.RegisterType((*StatusResponse)(nil), "bpfki.StatusResponse") } -func init() { proto.RegisterFile("bpfki.proto", fileDescriptor_62eed357eb71de0e) } +func init() { protoregistry.GlobalFiles.RegisterFile("bpfki.proto", fileDescriptor_62eed357eb71de0e) } var fileDescriptor_62eed357eb71de0e = []byte{ // 518 bytes of a gzipped FileDescriptorProto
1
// Code generated by protoc-gen-go. DO NOT EDIT. // source: bpfki.proto package bpfki import ( context "context" fmt "fmt" math "math" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type FailKernRequest_FAILTYPE int32 const ( FailKernRequest_SLAB FailKernRequest_FAILTYPE = 0 FailKernRequest_PAGE FailKernRequest_FAILTYPE = 1 FailKernRequest_BIO FailKernRequest_FAILTYPE = 2 ) var FailKernRequest_FAILTYPE_name = map[int32]string{ 0: "SLAB", 1: "PAGE", 2: "BIO", } var FailKernRequest_FAILTYPE_value = map[string]int32{ "SLAB": 0, "PAGE": 1, "BIO": 2, } func (x FailKernRequest_FAILTYPE) String() string { return proto.EnumName(FailKernRequest_FAILTYPE_name, int32(x)) } func (FailKernRequest_FAILTYPE) EnumDescriptor() ([]byte, []int) { return fileDescriptor_62eed357eb71de0e, []int{1, 0} } type BumpTimeRequest struct { Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` Tid uint32 `protobuf:"varint,2,opt,name=tid,proto3" json:"tid,omitempty"` Second int32 `protobuf:"varint,3,opt,name=second,proto3" json:"second,omitempty"` Subsecond int32 `protobuf:"varint,4,opt,name=subsecond,proto3" json:"subsecond,omitempty"` Probability float32 `protobuf:"fixed32,5,opt,name=probability,proto3" json:"probability,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *BumpTimeRequest) Reset() { *m = BumpTimeRequest{} } func (m *BumpTimeRequest) String() string { return proto.CompactTextString(m) } func (*BumpTimeRequest) ProtoMessage() {} func (*BumpTimeRequest) Descriptor() ([]byte, []int) { return fileDescriptor_62eed357eb71de0e, []int{0} } func (m *BumpTimeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BumpTimeRequest.Unmarshal(m, b) } func (m *BumpTimeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BumpTimeRequest.Marshal(b, m, deterministic) } func (m *BumpTimeRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_BumpTimeRequest.Merge(m, src) } func (m *BumpTimeRequest) XXX_Size() int { return xxx_messageInfo_BumpTimeRequest.Size(m) } func (m *BumpTimeRequest) XXX_DiscardUnknown() { xxx_messageInfo_BumpTimeRequest.DiscardUnknown(m) } var xxx_messageInfo_BumpTimeRequest proto.InternalMessageInfo func (m *BumpTimeRequest) GetPid() uint32 { if m != nil { return m.Pid } return 0 } func (m *BumpTimeRequest) GetTid() uint32 { if m != nil { return m.Tid } return 0 } func (m *BumpTimeRequest) GetSecond() int32 { if m != nil { return m.Second } return 0 } func (m *BumpTimeRequest) GetSubsecond() int32 { if m != nil { return m.Subsecond } return 0 } func (m *BumpTimeRequest) GetProbability() float32 { if m != nil { return m.Probability } return 0 } type FailKernRequest struct { Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` Tid uint32 `protobuf:"varint,2,opt,name=tid,proto3" json:"tid,omitempty"` Ftype FailKernRequest_FAILTYPE `protobuf:"varint,3,opt,name=ftype,proto3,enum=bpfki.FailKernRequest_FAILTYPE" json:"ftype,omitempty"` Headers []string `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty"` Callchain []*FailKernRequestFrame `protobuf:"bytes,5,rep,name=callchain,proto3" json:"callchain,omitempty"` Probability float32 `protobuf:"fixed32,6,opt,name=probability,proto3" json:"probability,omitempty"` Times uint32 `protobuf:"varint,7,opt,name=times,proto3" json:"times,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *FailKernRequest) Reset() { *m = FailKernRequest{} } func (m *FailKernRequest) String() string { return proto.CompactTextString(m) } func (*FailKernRequest) ProtoMessage() {} func (*FailKernRequest) Descriptor() ([]byte, []int) { return fileDescriptor_62eed357eb71de0e, []int{1} } func (m *FailKernRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FailKernRequest.Unmarshal(m, b) } func (m *FailKernRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FailKernRequest.Marshal(b, m, deterministic) } func (m *FailKernRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_FailKernRequest.Merge(m, src) } func (m *FailKernRequest) XXX_Size() int { return xxx_messageInfo_FailKernRequest.Size(m) } func (m *FailKernRequest) XXX_DiscardUnknown() { xxx_messageInfo_FailKernRequest.DiscardUnknown(m) } var xxx_messageInfo_FailKernRequest proto.InternalMessageInfo func (m *FailKernRequest) GetPid() uint32 { if m != nil { return m.Pid } return 0 } func (m *FailKernRequest) GetTid() uint32 { if m != nil { return m.Tid } return 0 } func (m *FailKernRequest) GetFtype() FailKernRequest_FAILTYPE { if m != nil { return m.Ftype } return FailKernRequest_SLAB } func (m *FailKernRequest) GetHeaders() []string { if m != nil { return m.Headers } return nil } func (m *FailKernRequest) GetCallchain() []*FailKernRequestFrame { if m != nil { return m.Callchain } return nil } func (m *FailKernRequest) GetProbability() float32 { if m != nil { return m.Probability } return 0 } func (m *FailKernRequest) GetTimes() uint32 { if m != nil { return m.Times } return 0 } type FailKernRequestFrame struct { Funcname string `protobuf:"bytes,1,opt,name=funcname,proto3" json:"funcname,omitempty"` Parameters string `protobuf:"bytes,2,opt,name=parameters,proto3" json:"parameters,omitempty"` Predicate string `protobuf:"bytes,3,opt,name=predicate,proto3" json:"predicate,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *FailKernRequestFrame) Reset() { *m = FailKernRequestFrame{} } func (m *FailKernRequestFrame) String() string { return proto.CompactTextString(m) } func (*FailKernRequestFrame) ProtoMessage() {} func (*FailKernRequestFrame) Descriptor() ([]byte, []int) { return fileDescriptor_62eed357eb71de0e, []int{1, 0} } func (m *FailKernRequestFrame) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FailKernRequestFrame.Unmarshal(m, b) } func (m *FailKernRequestFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FailKernRequestFrame.Marshal(b, m, deterministic) } func (m *FailKernRequestFrame) XXX_Merge(src proto.Message) { xxx_messageInfo_FailKernRequestFrame.Merge(m, src) } func (m *FailKernRequestFrame) XXX_Size() int { return xxx_messageInfo_FailKernRequestFrame.Size(m) } func (m *FailKernRequestFrame) XXX_DiscardUnknown() { xxx_messageInfo_FailKernRequestFrame.DiscardUnknown(m) } var xxx_messageInfo_FailKernRequestFrame proto.InternalMessageInfo func (m *FailKernRequestFrame) GetFuncname() string { if m != nil { return m.Funcname } return "" } func (m *FailKernRequestFrame) GetParameters() string { if m != nil { return m.Parameters } return "" } func (m *FailKernRequestFrame) GetPredicate() string { if m != nil { return m.Predicate } return "" } type FailSyscallRequest struct { Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` Tid uint32 `protobuf:"varint,2,opt,name=tid,proto3" json:"tid,omitempty"` Methods []string `protobuf:"bytes,3,rep,name=methods,proto3" json:"methods,omitempty"` Err uint32 `protobuf:"varint,4,opt,name=err,proto3" json:"err,omitempty"` Probability float32 `protobuf:"fixed32,5,opt,name=probability,proto3" json:"probability,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *FailSyscallRequest) Reset() { *m = FailSyscallRequest{} } func (m *FailSyscallRequest) String() string { return proto.CompactTextString(m) } func (*FailSyscallRequest) ProtoMessage() {} func (*FailSyscallRequest) Descriptor() ([]byte, []int) { return fileDescriptor_62eed357eb71de0e, []int{2} } func (m *FailSyscallRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FailSyscallRequest.Unmarshal(m, b) } func (m *FailSyscallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FailSyscallRequest.Marshal(b, m, deterministic) } func (m *FailSyscallRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_FailSyscallRequest.Merge(m, src) } func (m *FailSyscallRequest) XXX_Size() int { return xxx_messageInfo_FailSyscallRequest.Size(m) } func (m *FailSyscallRequest) XXX_DiscardUnknown() { xxx_messageInfo_FailSyscallRequest.DiscardUnknown(m) } var xxx_messageInfo_FailSyscallRequest proto.InternalMessageInfo func (m *FailSyscallRequest) GetPid() uint32 { if m != nil { return m.Pid } return 0 } func (m *FailSyscallRequest) GetTid() uint32 { if m != nil { return m.Tid } return 0 } func (m *FailSyscallRequest) GetMethods() []string { if m != nil { return m.Methods } return nil } func (m *FailSyscallRequest) GetErr() uint32 { if m != nil { return m.Err } return 0 } func (m *FailSyscallRequest) GetProbability() float32 { if m != nil { return m.Probability } return 0 } type StatusResponse struct { Ret int32 `protobuf:"varint,1,opt,name=ret,proto3" json:"ret,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StatusResponse) Reset() { *m = StatusResponse{} } func (m *StatusResponse) String() string { return proto.CompactTextString(m) } func (*StatusResponse) ProtoMessage() {} func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor_62eed357eb71de0e, []int{3} } func (m *StatusResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StatusResponse.Unmarshal(m, b) } func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) } func (m *StatusResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StatusResponse.Merge(m, src) } func (m *StatusResponse) XXX_Size() int { return xxx_messageInfo_StatusResponse.Size(m) } func (m *StatusResponse) XXX_DiscardUnknown() { xxx_messageInfo_StatusResponse.DiscardUnknown(m) } var xxx_messageInfo_StatusResponse proto.InternalMessageInfo func (m *StatusResponse) GetRet() int32 { if m != nil { return m.Ret } return 0 } func (m *StatusResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func init() { proto.RegisterEnum("bpfki.FailKernRequest_FAILTYPE", FailKernRequest_FAILTYPE_name, FailKernRequest_FAILTYPE_value) proto.RegisterType((*BumpTimeRequest)(nil), "bpfki.BumpTimeRequest") proto.RegisterType((*FailKernRequest)(nil), "bpfki.FailKernRequest") proto.RegisterType((*FailKernRequestFrame)(nil), "bpfki.FailKernRequest.frame") proto.RegisterType((*FailSyscallRequest)(nil), "bpfki.FailSyscallRequest") proto.RegisterType((*StatusResponse)(nil), "bpfki.StatusResponse") } func init() { proto.RegisterFile("bpfki.proto", fileDescriptor_62eed357eb71de0e) } var fileDescriptor_62eed357eb71de0e = []byte{ // 518 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0xda, 0x40, 0x10, 0x8d, 0x31, 0xe6, 0x63, 0xdc, 0x00, 0x5a, 0xb5, 0x91, 0x8b, 0xa2, 0xd6, 0xe2, 0x52, 0x4e, 0x1c, 0x68, 0x7b, 0xa9, 0xd4, 0x48, 0x58, 0x0d, 0x15, 0x4a, 0x22, 0xd0, 0x3a, 0xaa, 0xd4, 0xe3, 0x62, 0x0f, 0x65, 0x55, 0xfc, 0xd1, 0xdd, 0x25, 0x12, 0x7f, 0x20, 0xb7, 0xfe, 0x87, 0xfe, 0xd4, 0x6a, 0x6d, 0x07, 0x1c, 0xa2, 0x48, 0x21, 0xb9, 0xcd, 0xbc, 0xdd, 0x7d, 0x7e, 0x6f, 0xe6, 0xc9, 0x60, 0xcf, 0xd3, 0xc5, 0x6f, 0x3e, 0x48, 0x45, 0xa2, 0x12, 0x62, 0x65, 0x4d, 0xef, 0xaf, 0x01, 0x6d, 0x6f, 0x1d, 0xa5, 0xd7, 0x3c, 0x42, 0x8a, 0x7f, 0xd6, 0x28, 0x15, 0xe9, 0x80, 0x99, 0xf2, 0xd0, 0x31, 0x5c, 0xa3, 0x7f, 0x4c, 0x75, 0xa9, 0x11, 0xc5, 0x43, 0xa7, 0x92, 0x23, 0x8a, 0x87, 0xe4, 0x04, 0x6a, 0x12, 0x83, 0x24, 0x0e, 0x1d, 0xd3, 0x35, 0xfa, 0x16, 0x2d, 0x3a, 0x72, 0x0a, 0x4d, 0xb9, 0x9e, 0x17, 0x47, 0xd5, 0xec, 0x68, 0x07, 0x10, 0x17, 0xec, 0x54, 0x24, 0x73, 0x36, 0xe7, 0x2b, 0xae, 0x36, 0x8e, 0xe5, 0x1a, 0xfd, 0x0a, 0x2d, 0x43, 0xbd, 0x5b, 0x13, 0xda, 0x63, 0xc6, 0x57, 0x17, 0x28, 0xe2, 0x43, 0xf4, 0x7c, 0x06, 0x6b, 0xa1, 0x36, 0x29, 0x66, 0x72, 0x5a, 0xc3, 0xf7, 0x83, 0xdc, 0xeb, 0x1e, 0xd5, 0x60, 0x3c, 0x9a, 0x5c, 0x5e, 0xff, 0x9c, 0x9d, 0xd3, 0xfc, 0x36, 0x71, 0xa0, 0xbe, 0x44, 0x16, 0xa2, 0x90, 0x4e, 0xd5, 0x35, 0xfb, 0x4d, 0x7a, 0xd7, 0x92, 0x2f, 0xd0, 0x0c, 0xd8, 0x6a, 0x15, 0x2c, 0x19, 0x8f, 0x1d, 0xcb, 0x35, 0xfb, 0xf6, 0xf0, 0xf4, 0x11, 0xd2, 0x85, 0x60, 0x11, 0xd2, 0xdd, 0xf5, 0x7d, 0x9b, 0xb5, 0x07, 0x36, 0xc9, 0x6b, 0xb0, 0x14, 0x8f, 0x50, 0x3a, 0xf5, 0xcc, 0x42, 0xde, 0x74, 0x19, 0x58, 0x19, 0x17, 0xe9, 0x42, 0x63, 0xb1, 0x8e, 0x83, 0x98, 0x45, 0x98, 0xd9, 0x6e, 0xd2, 0x6d, 0x4f, 0xde, 0x01, 0xa4, 0x4c, 0xdf, 0x52, 0x5a, 0x75, 0x25, 0x3b, 0x2d, 0x21, 0x7a, 0x03, 0xa9, 0xc0, 0x90, 0x07, 0x4c, 0xe5, 0xd3, 0x68, 0xd2, 0x1d, 0xd0, 0xfb, 0x00, 0x8d, 0xbb, 0x19, 0x90, 0x06, 0x54, 0xfd, 0xcb, 0x91, 0xd7, 0x39, 0xd2, 0xd5, 0x6c, 0xf4, 0xfd, 0xbc, 0x63, 0x90, 0x3a, 0x98, 0xde, 0x64, 0xda, 0xa9, 0xf4, 0x6e, 0x0d, 0x20, 0xda, 0xa8, 0xbf, 0x91, 0xda, 0xd8, 0x21, 0xbb, 0x70, 0xa0, 0x1e, 0xa1, 0x5a, 0x26, 0xa1, 0x74, 0xcc, 0x7c, 0xa8, 0x45, 0xab, 0xef, 0xa2, 0x10, 0x59, 0x2e, 0x8e, 0xa9, 0x2e, 0x9f, 0x90, 0x88, 0x4f, 0xd0, 0xf2, 0x15, 0x53, 0x6b, 0x49, 0x51, 0xa6, 0x49, 0x2c, 0x51, 0xb3, 0x08, 0x54, 0x99, 0x06, 0x8b, 0xea, 0x52, 0x23, 0x91, 0xfc, 0x55, 0x0c, 0x43, 0x97, 0xc3, 0x7f, 0x55, 0x78, 0xe5, 0xcd, 0xc6, 0x17, 0x13, 0x1f, 0xc5, 0x0d, 0x0f, 0x90, 0x7c, 0x05, 0xf0, 0x51, 0xe9, 0x98, 0xff, 0x60, 0x2b, 0x72, 0x52, 0xac, 0x72, 0x2f, 0xfa, 0xdd, 0x37, 0x05, 0x7e, 0xff, 0x8b, 0xbd, 0x23, 0x32, 0x82, 0x16, 0xc5, 0x20, 0xb9, 0x41, 0xf1, 0x6c, 0x8a, 0x33, 0xb0, 0x0b, 0x05, 0x7e, 0x8a, 0xc1, 0xe1, 0xef, 0x3d, 0x68, 0x97, 0x24, 0x3c, 0x8f, 0xe3, 0x0c, 0x6c, 0xbd, 0xd4, 0xab, 0xab, 0xa9, 0xf0, 0x26, 0xd3, 0xed, 0xfb, 0xbd, 0x44, 0x3f, 0x65, 0x0c, 0x2f, 0xa0, 0xb0, 0x4b, 0xb9, 0x22, 0x6f, 0x4b, 0xef, 0xef, 0x67, 0xed, 0x71, 0x8a, 0x6f, 0x5b, 0x15, 0x2f, 0x60, 0x99, 0xd7, 0xb2, 0x1f, 0xe1, 0xc7, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x91, 0xa8, 0xf4, 0xbf, 0x17, 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // BPFKIServiceClient is the client API for BPFKIService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type BPFKIServiceClient interface { SetTimeVal(ctx context.Context, in *BumpTimeRequest, opts ...grpc.CallOption) (*StatusResponse, error) RecoverTimeVal(ctx context.Context, in *BumpTimeRequest, opts ...grpc.CallOption) (*StatusResponse, error) SetTimeSpec(ctx context.Context, in *BumpTimeRequest, opts ...grpc.CallOption) (*StatusResponse, error) RecoverTimeSpec(ctx context.Context, in *BumpTimeRequest, opts ...grpc.CallOption) (*StatusResponse, error) FailMMOrBIO(ctx context.Context, in *FailKernRequest, opts ...grpc.CallOption) (*StatusResponse, error) RecoverMMOrBIO(ctx context.Context, in *FailKernRequest, opts ...grpc.CallOption) (*StatusResponse, error) FailSyscall(ctx context.Context, in *FailSyscallRequest, opts ...grpc.CallOption) (*StatusResponse, error) RecoverSyscall(ctx context.Context, in *FailSyscallRequest, opts ...grpc.CallOption) (*StatusResponse, error) } type bPFKIServiceClient struct { cc *grpc.ClientConn } func NewBPFKIServiceClient(cc *grpc.ClientConn) BPFKIServiceClient { return &bPFKIServiceClient{cc} } func (c *bPFKIServiceClient) SetTimeVal(ctx context.Context, in *BumpTimeRequest, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) err := c.cc.Invoke(ctx, "/bpfki.BPFKIService/SetTimeVal", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *bPFKIServiceClient) RecoverTimeVal(ctx context.Context, in *BumpTimeRequest, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) err := c.cc.Invoke(ctx, "/bpfki.BPFKIService/RecoverTimeVal", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *bPFKIServiceClient) SetTimeSpec(ctx context.Context, in *BumpTimeRequest, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) err := c.cc.Invoke(ctx, "/bpfki.BPFKIService/SetTimeSpec", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *bPFKIServiceClient) RecoverTimeSpec(ctx context.Context, in *BumpTimeRequest, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) err := c.cc.Invoke(ctx, "/bpfki.BPFKIService/RecoverTimeSpec", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *bPFKIServiceClient) FailMMOrBIO(ctx context.Context, in *FailKernRequest, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) err := c.cc.Invoke(ctx, "/bpfki.BPFKIService/FailMMOrBIO", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *bPFKIServiceClient) RecoverMMOrBIO(ctx context.Context, in *FailKernRequest, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) err := c.cc.Invoke(ctx, "/bpfki.BPFKIService/RecoverMMOrBIO", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *bPFKIServiceClient) FailSyscall(ctx context.Context, in *FailSyscallRequest, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) err := c.cc.Invoke(ctx, "/bpfki.BPFKIService/FailSyscall", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *bPFKIServiceClient) RecoverSyscall(ctx context.Context, in *FailSyscallRequest, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) err := c.cc.Invoke(ctx, "/bpfki.BPFKIService/RecoverSyscall", in, out, opts...) if err != nil { return nil, err } return out, nil } // BPFKIServiceServer is the server API for BPFKIService service. type BPFKIServiceServer interface { SetTimeVal(context.Context, *BumpTimeRequest) (*StatusResponse, error) RecoverTimeVal(context.Context, *BumpTimeRequest) (*StatusResponse, error) SetTimeSpec(context.Context, *BumpTimeRequest) (*StatusResponse, error) RecoverTimeSpec(context.Context, *BumpTimeRequest) (*StatusResponse, error) FailMMOrBIO(context.Context, *FailKernRequest) (*StatusResponse, error) RecoverMMOrBIO(context.Context, *FailKernRequest) (*StatusResponse, error) FailSyscall(context.Context, *FailSyscallRequest) (*StatusResponse, error) RecoverSyscall(context.Context, *FailSyscallRequest) (*StatusResponse, error) } // UnimplementedBPFKIServiceServer can be embedded to have forward compatible implementations. type UnimplementedBPFKIServiceServer struct { } func (*UnimplementedBPFKIServiceServer) SetTimeVal(ctx context.Context, req *BumpTimeRequest) (*StatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SetTimeVal not implemented") } func (*UnimplementedBPFKIServiceServer) RecoverTimeVal(ctx context.Context, req *BumpTimeRequest) (*StatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RecoverTimeVal not implemented") } func (*UnimplementedBPFKIServiceServer) SetTimeSpec(ctx context.Context, req *BumpTimeRequest) (*StatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SetTimeSpec not implemented") } func (*UnimplementedBPFKIServiceServer) RecoverTimeSpec(ctx context.Context, req *BumpTimeRequest) (*StatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RecoverTimeSpec not implemented") } func (*UnimplementedBPFKIServiceServer) FailMMOrBIO(ctx context.Context, req *FailKernRequest) (*StatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FailMMOrBIO not implemented") } func (*UnimplementedBPFKIServiceServer) RecoverMMOrBIO(ctx context.Context, req *FailKernRequest) (*StatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RecoverMMOrBIO not implemented") } func (*UnimplementedBPFKIServiceServer) FailSyscall(ctx context.Context, req *FailSyscallRequest) (*StatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FailSyscall not implemented") } func (*UnimplementedBPFKIServiceServer) RecoverSyscall(ctx context.Context, req *FailSyscallRequest) (*StatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RecoverSyscall not implemented") } func RegisterBPFKIServiceServer(s *grpc.Server, srv BPFKIServiceServer) { s.RegisterService(&_BPFKIService_serviceDesc, srv) } func _BPFKIService_SetTimeVal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(BumpTimeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(BPFKIServiceServer).SetTimeVal(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/bpfki.BPFKIService/SetTimeVal", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BPFKIServiceServer).SetTimeVal(ctx, req.(*BumpTimeRequest)) } return interceptor(ctx, in, info, handler) } func _BPFKIService_RecoverTimeVal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(BumpTimeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(BPFKIServiceServer).RecoverTimeVal(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/bpfki.BPFKIService/RecoverTimeVal", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BPFKIServiceServer).RecoverTimeVal(ctx, req.(*BumpTimeRequest)) } return interceptor(ctx, in, info, handler) } func _BPFKIService_SetTimeSpec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(BumpTimeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(BPFKIServiceServer).SetTimeSpec(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/bpfki.BPFKIService/SetTimeSpec", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BPFKIServiceServer).SetTimeSpec(ctx, req.(*BumpTimeRequest)) } return interceptor(ctx, in, info, handler) } func _BPFKIService_RecoverTimeSpec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(BumpTimeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(BPFKIServiceServer).RecoverTimeSpec(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/bpfki.BPFKIService/RecoverTimeSpec", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BPFKIServiceServer).RecoverTimeSpec(ctx, req.(*BumpTimeRequest)) } return interceptor(ctx, in, info, handler) } func _BPFKIService_FailMMOrBIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FailKernRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(BPFKIServiceServer).FailMMOrBIO(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/bpfki.BPFKIService/FailMMOrBIO", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BPFKIServiceServer).FailMMOrBIO(ctx, req.(*FailKernRequest)) } return interceptor(ctx, in, info, handler) } func _BPFKIService_RecoverMMOrBIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FailKernRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(BPFKIServiceServer).RecoverMMOrBIO(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/bpfki.BPFKIService/RecoverMMOrBIO", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BPFKIServiceServer).RecoverMMOrBIO(ctx, req.(*FailKernRequest)) } return interceptor(ctx, in, info, handler) } func _BPFKIService_FailSyscall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FailSyscallRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(BPFKIServiceServer).FailSyscall(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/bpfki.BPFKIService/FailSyscall", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BPFKIServiceServer).FailSyscall(ctx, req.(*FailSyscallRequest)) } return interceptor(ctx, in, info, handler) } func _BPFKIService_RecoverSyscall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FailSyscallRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(BPFKIServiceServer).RecoverSyscall(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/bpfki.BPFKIService/RecoverSyscall", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BPFKIServiceServer).RecoverSyscall(ctx, req.(*FailSyscallRequest)) } return interceptor(ctx, in, info, handler) } var _BPFKIService_serviceDesc = grpc.ServiceDesc{ ServiceName: "bpfki.BPFKIService", HandlerType: (*BPFKIServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "SetTimeVal", Handler: _BPFKIService_SetTimeVal_Handler, }, { MethodName: "RecoverTimeVal", Handler: _BPFKIService_RecoverTimeVal_Handler, }, { MethodName: "SetTimeSpec", Handler: _BPFKIService_SetTimeSpec_Handler, }, { MethodName: "RecoverTimeSpec", Handler: _BPFKIService_RecoverTimeSpec_Handler, }, { MethodName: "FailMMOrBIO", Handler: _BPFKIService_FailMMOrBIO_Handler, }, { MethodName: "RecoverMMOrBIO", Handler: _BPFKIService_RecoverMMOrBIO_Handler, }, { MethodName: "FailSyscall", Handler: _BPFKIService_FailSyscall_Handler, }, { MethodName: "RecoverSyscall", Handler: _BPFKIService_RecoverSyscall_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "bpfki.proto", }
1
21,630
Code generated by protoc-gen-go. **DO NOT EDIT.**. The correct way to fix `deprecated` in this file may be updating the protoc / protobuf / grpc and regenerate this file.
chaos-mesh-chaos-mesh
go
@@ -15,6 +15,7 @@ import java.util.List; * * @author Brian Remedios */ +@Deprecated public class IntegerPropertyTest extends AbstractNumericPropertyDescriptorTester<Integer> { private static final int MIN = 1;
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.properties; import java.util.List; /** * Evaluates the functionality of the IntegerProperty descriptor by testing its * ability to catch creation errors (illegal args), flag out-of-range test * values, and serialize/deserialize groups of integers onto/from a string * buffer. * * @author Brian Remedios */ public class IntegerPropertyTest extends AbstractNumericPropertyDescriptorTester<Integer> { private static final int MIN = 1; private static final int MAX = 12; private static final int SHIFT = 4; public IntegerPropertyTest() { super("Integer"); } /* @Override @Test public void testErrorForBadSingle() { } // not until int properties get ranges @Override @Test public void testErrorForBadMulti() { } // not until int properties get ranges */ @Override protected Integer createValue() { return randomInt(MIN, MAX); } @Override protected Integer createBadValue() { return randomBool() ? randomInt(MIN - SHIFT, MIN - 1) : randomInt(MAX + 1, MAX + SHIFT); } protected IntegerProperty.IntegerPBuilder singleBuilder() { return IntegerProperty.named("test").desc("foo") .range(MIN, MAX).defaultValue(createValue()).uiOrder(1.0f); } protected IntegerMultiProperty.IntegerMultiPBuilder multiBuilder() { return IntegerMultiProperty.named("test").desc("foo") .range(MIN, MAX).defaultValues(createValue(), createValue()).uiOrder(1.0f); } @Override protected PropertyDescriptor<Integer> createProperty() { return new IntegerProperty("testInteger", "Test integer property", MIN, MAX, MAX - 1, 1.0f); } @Override protected PropertyDescriptor<List<Integer>> createMultiProperty() { return new IntegerMultiProperty("testInteger", "Test integer property", MIN, MAX, new Integer[] {MIN, MIN + 1, MAX - 1, MAX}, 1.0f); } @Override protected PropertyDescriptor<Integer> createBadProperty() { return new IntegerProperty("", "Test integer property", MIN, MAX, MAX + 1, 1.0f); } @Override protected PropertyDescriptor<List<Integer>> createBadMultiProperty() { return new IntegerMultiProperty("testInteger", "", MIN, MAX, new Integer[] {MIN - 1, MAX}, 1.0f); } @Override protected Integer min() { return MIN; } @Override protected Integer max() { return MAX; } }
1
14,995
We should make sure that we cover the testcases in a new unit test class.
pmd-pmd
java
@@ -0,0 +1,19 @@ +package proofs + +// #cgo LDFLAGS: -L${SRCDIR}/../proofs/lib -lfilecoin_proofs +// #cgo pkg-config: ${SRCDIR}/../proofs/lib/pkgconfig/libfilecoin_proofs.pc +// #include "../proofs/include/libfilecoin_proofs.h" +import "C" +import ( + "unsafe" +) + +func SectorSize(ssType Mode) uint64 { + scfg, err := CProofsMode(ssType) + if err != nil { + return 0 + } + + numFromC := C.get_max_user_bytes_per_staged_sector((*C.ConfiguredStore)(unsafe.Pointer(scfg))) + return uint64(numFromC) +}
1
1
18,399
Is this only used in test code? Is there future utility in it for production code? If not, it would be nice if this could be moved into a test package. I'm not sure how hard that would be. CC @laser
filecoin-project-venus
go
@@ -192,7 +192,7 @@ func (p *Plugin) setConfig(config *pluginConfig) error { p.cancelWatcher() p.cancelWatcher = nil } - if config.WebhookLabel != "" { + if config.WebhookLabel != "" || config.APIServiceLabel != "" { p.cancelWatcher = cancelWatcher }
1
package k8sbundle import ( "bytes" "context" "encoding/json" "encoding/pem" "errors" "fmt" "strings" "sync" "github.com/hashicorp/go-hclog" "github.com/hashicorp/hcl" "github.com/spiffe/spire-plugin-sdk/pluginsdk" "github.com/spiffe/spire/pkg/common/catalog" "github.com/spiffe/spire/proto/spire/common" spi "github.com/spiffe/spire/proto/spire/common/plugin" identityproviderv0 "github.com/spiffe/spire/proto/spire/hostservice/server/identityprovider/v0" notifierv0 "github.com/spiffe/spire/proto/spire/plugin/server/notifier/v0" "github.com/zeebo/errs" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" admissionv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/retry" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" aggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" ) var ( k8sErr = errs.Class("k8s-bundle") ) const ( defaultNamespace = "spire" defaultConfigMap = "spire-bundle" defaultConfigMapKey = "bundle.crt" ) func BuiltIn() catalog.BuiltIn { return builtIn(New()) } func builtIn(p *Plugin) catalog.BuiltIn { return catalog.MakeBuiltIn("k8sbundle", notifierv0.NotifierPluginServer(p), ) } type pluginConfig struct { Namespace string `hcl:"namespace"` ConfigMap string `hcl:"config_map"` ConfigMapKey string `hcl:"config_map_key"` WebhookLabel string `hcl:"webhook_label"` APIServiceLabel string `hcl:"api_service_label"` KubeConfigFilePath string `hcl:"kube_config_file_path"` } type Plugin struct { notifierv0.UnsafeNotifierServer mu sync.RWMutex log hclog.Logger config *pluginConfig identityProvider identityproviderv0.IdentityProviderServiceClient cancelWatcher func() hooks struct { newKubeClient func(c *pluginConfig) ([]kubeClient, error) } } func New() *Plugin { p := &Plugin{} p.hooks.newKubeClient = newKubeClient return p } func (p *Plugin) SetLogger(log hclog.Logger) { p.log = log } func (p *Plugin) BrokerHostServices(broker pluginsdk.ServiceBroker) error { if !broker.BrokerClient(&p.identityProvider) { return k8sErr.New("IdentityProvider host service is required") } return nil } func (p *Plugin) Notify(ctx context.Context, req *notifierv0.NotifyRequest) (*notifierv0.NotifyResponse, error) { config, err := p.getConfig() if err != nil { return nil, err } if _, ok := req.Event.(*notifierv0.NotifyRequest_BundleUpdated); ok { // ignore the bundle presented in the request. see updateBundle for details on why. if err := p.updateBundles(ctx, config); err != nil { return nil, err } } return &notifierv0.NotifyResponse{}, nil } func (p *Plugin) NotifyAndAdvise(ctx context.Context, req *notifierv0.NotifyAndAdviseRequest) (*notifierv0.NotifyAndAdviseResponse, error) { config, err := p.getConfig() if err != nil { return nil, err } if _, ok := req.Event.(*notifierv0.NotifyAndAdviseRequest_BundleLoaded); ok { // ignore the bundle presented in the request. see updateBundle for details on why. if err := p.updateBundles(ctx, config); err != nil { return nil, err } } return &notifierv0.NotifyAndAdviseResponse{}, nil } func (p *Plugin) Configure(ctx context.Context, req *spi.ConfigureRequest) (resp *spi.ConfigureResponse, err error) { config := new(pluginConfig) if err := hcl.Decode(&config, req.Configuration); err != nil { return nil, k8sErr.New("unable to decode configuration: %v", err) } if config.Namespace == "" { config.Namespace = defaultNamespace } if config.ConfigMap == "" { config.ConfigMap = defaultConfigMap } if config.ConfigMapKey == "" { config.ConfigMapKey = defaultConfigMapKey } if err = p.setConfig(config); err != nil { return nil, k8sErr.New("unable to set configuration: %v", err) } return &spi.ConfigureResponse{}, nil } func (p *Plugin) GetPluginInfo(ctx context.Context, req *spi.GetPluginInfoRequest) (*spi.GetPluginInfoResponse, error) { return &spi.GetPluginInfoResponse{}, nil } func (p *Plugin) getConfig() (*pluginConfig, error) { p.mu.RLock() defer p.mu.RUnlock() if p.config == nil { return nil, k8sErr.New("not configured") } return p.config, nil } func (p *Plugin) setConfig(config *pluginConfig) error { p.mu.Lock() defer p.mu.Unlock() // Start watcher to set CA Bundle in objects created after server has started var cancelWatcher func() if config.WebhookLabel != "" || config.APIServiceLabel != "" { ctx, cancel := context.WithCancel(context.Background()) watcher, err := newBundleWatcher(ctx, p, config) if err != nil { cancel() return err } var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() if err := watcher.Watch(ctx); err != nil && !errors.Is(err, context.Canceled) { p.log.Error("Unable to watch", "error", err) } }() cancelWatcher = func() { cancel() wg.Wait() } } if p.cancelWatcher != nil { p.cancelWatcher() p.cancelWatcher = nil } if config.WebhookLabel != "" { p.cancelWatcher = cancelWatcher } p.config = config return nil } // updateBundles iterates through all the objects that need an updated CA bundle // If an error is an encountered updating the bundle for an object, we record the // error and continue on to the next object func (p *Plugin) updateBundles(ctx context.Context, c *pluginConfig) (err error) { clients, err := p.hooks.newKubeClient(c) if err != nil { return err } var updateErrs string for _, client := range clients { list, err := client.GetList(ctx, c) if err != nil { updateErrs += fmt.Sprintf("unable to get list: %v, ", err) continue } listItems, err := meta.ExtractList(list) if err != nil { updateErrs += fmt.Sprintf("unable to extract list items: %v, ", err) continue } for _, item := range listItems { itemMeta, err := meta.Accessor(item) if err != nil { updateErrs += fmt.Sprintf("unable to extract metadata for item: %v, ", err) continue } err = p.updateBundle(ctx, c, client, itemMeta.GetNamespace(), itemMeta.GetName()) if err != nil && status.Code(err) != codes.AlreadyExists { updateErrs += fmt.Sprintf("%s: %v, ", namespacedName(itemMeta), err) } } } if len(updateErrs) > 0 { return k8sErr.New("unable to update: %s", strings.TrimSuffix(updateErrs, ", ")) } return nil } // updateBundle does the ready-modify-write semantics for Kubernetes, retrying on conflict func (p *Plugin) updateBundle(ctx context.Context, c *pluginConfig, client kubeClient, namespace, name string) (err error) { return retry.RetryOnConflict(retry.DefaultRetry, func() error { // Get the object so we can use the version to resolve conflicts racing // on updates from other servers. obj, err := client.Get(ctx, namespace, name) if err != nil { return k8sErr.New("unable to get object %s/%s: %v", namespace, name, err) } // Load bundle data from the registration api. The bundle has to be // loaded after fetching the object so we can properly detect and // correct a race updating the bundle (i.e. read-modify-write // semantics). resp, err := p.identityProvider.FetchX509Identity(ctx, &identityproviderv0.FetchX509IdentityRequest{}) if err != nil { return err } // Build patch with the new bundle data. The resource version MUST be set // to support conflict resolution. patch, err := client.CreatePatch(ctx, c, obj, resp) if err != nil { return err } // Patch the bundle, handling version conflicts patchBytes, err := json.Marshal(patch) if err != nil { return k8sErr.New("unable to marshal patch: %v", err) } return client.Patch(ctx, namespace, name, patchBytes) }) } func newKubeClient(c *pluginConfig) ([]kubeClient, error) { clientset, err := newKubeClientset(c.KubeConfigFilePath) if err != nil { return nil, k8sErr.Wrap(err) } aggregatorClientset, err := newAggregatorClientset(c.KubeConfigFilePath) if err != nil { return nil, k8sErr.Wrap(err) } clients := []kubeClient{configMapClient{Clientset: clientset}} if c.WebhookLabel != "" { clients = append(clients, mutatingWebhookClient{Clientset: clientset}, validatingWebhookClient{Clientset: clientset}, ) } if c.APIServiceLabel != "" { clients = append(clients, apiServiceClient{Clientset: aggregatorClientset}, ) } return clients, nil } func newKubeClientset(configPath string) (*kubernetes.Clientset, error) { config, err := getKubeConfig(configPath) if err != nil { return nil, err } client, err := kubernetes.NewForConfig(config) if err != nil { return nil, err } return client, nil } func newAggregatorClientset(configPath string) (*aggregator.Clientset, error) { config, err := getKubeConfig(configPath) if err != nil { return nil, err } client, err := aggregator.NewForConfig(config) if err != nil { return nil, err } return client, nil } func getKubeConfig(configPath string) (*rest.Config, error) { if configPath != "" { return clientcmd.BuildConfigFromFlags("", configPath) } return rest.InClusterConfig() } // kubeClient encapsulates the Kubenetes API for config maps, validating webhooks, and mutating webhooks type kubeClient interface { Get(ctx context.Context, namespace, name string) (runtime.Object, error) GetList(ctx context.Context, config *pluginConfig) (runtime.Object, error) CreatePatch(ctx context.Context, config *pluginConfig, obj runtime.Object, resp *identityproviderv0.FetchX509IdentityResponse) (runtime.Object, error) Patch(ctx context.Context, namespace, name string, patchBytes []byte) error Watch(ctx context.Context, config *pluginConfig) (watch.Interface, error) } // configMapClient encapsulates the Kubenetes API for updating the CA Bundle in a config map type configMapClient struct { *kubernetes.Clientset } func (c configMapClient) Get(ctx context.Context, namespace, configMap string) (runtime.Object, error) { return c.CoreV1().ConfigMaps(namespace).Get(ctx, configMap, metav1.GetOptions{}) } func (c configMapClient) GetList(ctx context.Context, config *pluginConfig) (runtime.Object, error) { obj, err := c.Get(ctx, config.Namespace, config.ConfigMap) if err != nil { return nil, err } configMap := obj.(*corev1.ConfigMap) return &corev1.ConfigMapList{ Items: []corev1.ConfigMap{*configMap}, }, nil } func (c configMapClient) CreatePatch(ctx context.Context, config *pluginConfig, obj runtime.Object, resp *identityproviderv0.FetchX509IdentityResponse) (runtime.Object, error) { configMap, ok := obj.(*corev1.ConfigMap) if !ok { return nil, status.Errorf(codes.InvalidArgument, "wrong type, expecting ConfigMap") } return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ ResourceVersion: configMap.ResourceVersion, }, Data: map[string]string{ config.ConfigMapKey: bundleData(resp.Bundle), }, }, nil } func (c configMapClient) Patch(ctx context.Context, namespace, name string, patchBytes []byte) error { _, err := c.CoreV1().ConfigMaps(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) return err } func (c configMapClient) Watch(ctx context.Context, config *pluginConfig) (watch.Interface, error) { return nil, nil } // apiServiceClient encapsulates the Kubenetes API for updating the CA Bundle in an API Service type apiServiceClient struct { *aggregator.Clientset } func (c apiServiceClient) Get(ctx context.Context, namespace, name string) (runtime.Object, error) { return c.ApiregistrationV1().APIServices().Get(ctx, name, metav1.GetOptions{}) } func (c apiServiceClient) GetList(ctx context.Context, config *pluginConfig) (runtime.Object, error) { return c.ApiregistrationV1().APIServices().List(ctx, metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=true", config.APIServiceLabel), }) } func (c apiServiceClient) CreatePatch(ctx context.Context, config *pluginConfig, obj runtime.Object, resp *identityproviderv0.FetchX509IdentityResponse) (runtime.Object, error) { apiService, ok := obj.(*apiregistrationv1.APIService) if !ok { return nil, status.Errorf(codes.InvalidArgument, "wrong type, expecting APIService") } // Check if APIService needs an update if bytes.Equal(apiService.Spec.CABundle, []byte(bundleData(resp.Bundle))) { return nil, status.Errorf(codes.AlreadyExists, "APIService %s is already up to date", apiService.Name) } patch := &apiregistrationv1.APIService{ ObjectMeta: metav1.ObjectMeta{ ResourceVersion: apiService.ResourceVersion, }, Spec: apiregistrationv1.APIServiceSpec{ CABundle: []byte(bundleData(resp.Bundle)), GroupPriorityMinimum: apiService.Spec.GroupPriorityMinimum, VersionPriority: apiService.Spec.VersionPriority, }, } return patch, nil } func (c apiServiceClient) Patch(ctx context.Context, namespace, name string, patchBytes []byte) error { _, err := c.ApiregistrationV1().APIServices().Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) return err } func (c apiServiceClient) Watch(ctx context.Context, config *pluginConfig) (watch.Interface, error) { return c.ApiregistrationV1().APIServices().Watch(ctx, metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=true", config.APIServiceLabel), }) } // mutatingWebhookClient encapsulates the Kubenetes API for updating the CA Bundle in a mutating webhook type mutatingWebhookClient struct { *kubernetes.Clientset } func (c mutatingWebhookClient) Get(ctx context.Context, namespace, mutatingWebhook string) (runtime.Object, error) { return c.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, mutatingWebhook, metav1.GetOptions{}) } func (c mutatingWebhookClient) GetList(ctx context.Context, config *pluginConfig) (runtime.Object, error) { return c.AdmissionregistrationV1().MutatingWebhookConfigurations().List(ctx, metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=true", config.WebhookLabel), }) } func (c mutatingWebhookClient) CreatePatch(ctx context.Context, config *pluginConfig, obj runtime.Object, resp *identityproviderv0.FetchX509IdentityResponse) (runtime.Object, error) { mutatingWebhook, ok := obj.(*admissionv1.MutatingWebhookConfiguration) if !ok { return nil, status.Errorf(codes.InvalidArgument, "wrong type, expecting MutatingWebhookConfiguration") } // Check if MutatingWebhookConfiguration needs an update needsUpdate := false for _, webhook := range mutatingWebhook.Webhooks { if !bytes.Equal(webhook.ClientConfig.CABundle, []byte(bundleData(resp.Bundle))) { needsUpdate = true break } } if !needsUpdate { return nil, status.Errorf(codes.AlreadyExists, "MutatingWebhookConfiguration %s is already up to date", mutatingWebhook.Name) } patch := &admissionv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ ResourceVersion: mutatingWebhook.ResourceVersion, }, } patch.Webhooks = make([]admissionv1.MutatingWebhook, len(mutatingWebhook.Webhooks)) // Step through all the the webhooks in the MutatingWebhookConfiguration for i := range patch.Webhooks { patch.Webhooks[i].AdmissionReviewVersions = mutatingWebhook.Webhooks[i].AdmissionReviewVersions patch.Webhooks[i].ClientConfig.CABundle = []byte(bundleData(resp.Bundle)) patch.Webhooks[i].Name = mutatingWebhook.Webhooks[i].Name patch.Webhooks[i].SideEffects = mutatingWebhook.Webhooks[i].SideEffects } return patch, nil } func (c mutatingWebhookClient) Patch(ctx context.Context, namespace, name string, patchBytes []byte) error { _, err := c.AdmissionregistrationV1().MutatingWebhookConfigurations().Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) return err } func (c mutatingWebhookClient) Watch(ctx context.Context, config *pluginConfig) (watch.Interface, error) { return c.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(ctx, metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=true", config.WebhookLabel), }) } // validatingWebhookClient encapsulates the Kubenetes API for updating the CA Bundle in a validating webhook type validatingWebhookClient struct { *kubernetes.Clientset } func (c validatingWebhookClient) Get(ctx context.Context, namespace, validatingWebhook string) (runtime.Object, error) { return c.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(ctx, validatingWebhook, metav1.GetOptions{}) } func (c validatingWebhookClient) GetList(ctx context.Context, config *pluginConfig) (runtime.Object, error) { return c.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(ctx, metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=true", config.WebhookLabel), }) } func (c validatingWebhookClient) CreatePatch(ctx context.Context, config *pluginConfig, obj runtime.Object, resp *identityproviderv0.FetchX509IdentityResponse) (runtime.Object, error) { validatingWebhook, ok := obj.(*admissionv1.ValidatingWebhookConfiguration) if !ok { return nil, status.Errorf(codes.InvalidArgument, "wrong type, expecting ValidatingWebhookConfiguration") } // Check if ValidatingWebhookConfiguration needs an update needsUpdate := false for _, webhook := range validatingWebhook.Webhooks { if !bytes.Equal(webhook.ClientConfig.CABundle, []byte(bundleData(resp.Bundle))) { needsUpdate = true break } } if !needsUpdate { return nil, status.Errorf(codes.AlreadyExists, "ValidatingWebhookConfiguration %s is already up to date", validatingWebhook.Name) } patch := &admissionv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ ResourceVersion: validatingWebhook.ResourceVersion, }, } patch.Webhooks = make([]admissionv1.ValidatingWebhook, len(validatingWebhook.Webhooks)) // Step through all the the webhooks in the ValidatingWebhookConfiguration for i := range patch.Webhooks { patch.Webhooks[i].AdmissionReviewVersions = validatingWebhook.Webhooks[i].AdmissionReviewVersions patch.Webhooks[i].ClientConfig.CABundle = []byte(bundleData(resp.Bundle)) patch.Webhooks[i].Name = validatingWebhook.Webhooks[i].Name patch.Webhooks[i].SideEffects = validatingWebhook.Webhooks[i].SideEffects } return patch, nil } func (c validatingWebhookClient) Patch(ctx context.Context, namespace, name string, patchBytes []byte) error { _, err := c.AdmissionregistrationV1().ValidatingWebhookConfigurations().Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) return err } func (c validatingWebhookClient) Watch(ctx context.Context, config *pluginConfig) (watch.Interface, error) { return c.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(ctx, metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=true", config.WebhookLabel), }) } // bundleData formats the bundle data for inclusion in the config map func bundleData(bundle *common.Bundle) string { bundleData := new(bytes.Buffer) for _, rootCA := range bundle.RootCas { _ = pem.Encode(bundleData, &pem.Block{ Type: "CERTIFICATE", Bytes: rootCA.DerBytes, }) } return bundleData.String() } // namespacedName returns "namespace/name" for namespaced resources and "name" for non-namespaced resources func namespacedName(itemMeta metav1.Object) string { if itemMeta.GetNamespace() != "" { return fmt.Sprintf("%s/%s", itemMeta.GetNamespace(), itemMeta.GetName()) } return itemMeta.GetName() }
1
16,533
Super curious how you noticed this, as SPIRE itself currently only configures once...?
spiffe-spire
go
@@ -21,11 +21,18 @@ public class NotFoundExceptionHandler implements ExceptionMapper<NotFoundExcepti @Override public Response toResponse(NotFoundException ex){ String uri = request.getRequestURI(); + String exMessage = ex.getMessage(); + String outputMessage; + if (exMessage != null && exMessage.startsWith("Datafile")) { + outputMessage = exMessage; + } else { + outputMessage = "endpoint does not exist on this server. Please check your code for typos, or consult our API guide at http://guides.dataverse.org."; + } return Response.status(404) .entity( Json.createObjectBuilder() .add("status", "ERROR") .add("code", 404) - .add("message", "'" + uri + "' endpoint does not exist on this server. Please check your code for typos, or consult our API guide at http://guides.dataverse.org.") + .add("message", "'" + uri + "' " + outputMessage) .build()) .type("application/json").build();
1
package edu.harvard.iq.dataverse.api.errorhandlers; import javax.json.Json; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.NotFoundException; import javax.ws.rs.core.Context; import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; /** * Produces custom 404 messages for the API. * @author michael */ @Provider public class NotFoundExceptionHandler implements ExceptionMapper<NotFoundException>{ @Context HttpServletRequest request; @Override public Response toResponse(NotFoundException ex){ String uri = request.getRequestURI(); return Response.status(404) .entity( Json.createObjectBuilder() .add("status", "ERROR") .add("code", 404) .add("message", "'" + uri + "' endpoint does not exist on this server. Please check your code for typos, or consult our API guide at http://guides.dataverse.org.") .build()) .type("application/json").build(); } }
1
37,771
This `startsWith` seems a bit brittle because in the code base we'll occasionally spell it "DataFile".
IQSS-dataverse
java
@@ -18,5 +18,6 @@ package azkaban.db; public class AzDBUtil { - static final int MAX_DB_RETRY_COUNT = 5; + // A very big Integer + static final int MAX_DB_RETRY_COUNT = 999999; }
1
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * */ package azkaban.db; public class AzDBUtil { static final int MAX_DB_RETRY_COUNT = 5; }
1
14,593
if it keeps reconnecting for too long, how should we get alerted?
azkaban-azkaban
java
@@ -263,7 +263,11 @@ void* CNSKListenerSrvr::OpenTCPIPSession() //LCOV_EXCL_STOP } - + TCP_SetKeepalive(nSocketFnum, + srvrGlobal->clientKeepaliveStatus, + srvrGlobal->clientKeepaliveIdletime, + srvrGlobal->clientKeepaliveIntervaltime, + srvrGlobal->clientKeepaliveRetrycount); pnode = GTransport.m_TCPIPSystemSrvr_list->ins_node(nSocketFnum); if (pnode == NULL)
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ ********************************************************************/ #include <platform_ndcs.h> #include "errno.h" #include "Transport.h" #include "Listener_srvr.h" #include "TCPIPSystemSrvr.h" #include "FileSystemSrvr.h" #include "Global.h" #include "SrvrConnect.h" #include <arpa/inet.h> #include <sys/socket.h> #include <netdb.h> #include <ifaddrs.h> #include <linux/unistd.h> extern SRVR_GLOBAL_Def *srvrGlobal; extern void SyncPublicationThread(); void CNSKListenerSrvr::closeTCPIPSession(int fnum) { shutdown(fnum, SHUT_RDWR); close(fnum); FD_CLR(fnum, &read_fds_); FD_CLR(fnum, &error_fds_); // if (fnum == max_read_fd_) max_read_fd_--; // max_read_fd_ = m_nListenSocketFnum; max_read_fd_ = pipefd[0]; // m_nListenSocketFnum; } bool CNSKListenerSrvr::ListenToPort(int port) { char tmp[500]; int error; struct sockaddr_in6 *sin6 = NULL; struct sockaddr_in *sin4 = NULL; max_read_fd_ = 0; if (m_nListenSocketFnum < 1) { sprintf(tmp,"ListenToPort[%d][%d]", port, m_nListenSocketFnum ); SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, tmp, O_INIT_PROCESS, F_SOCKET, 0, 0); if (m_bIPv4 == false) { //LCOV_EXCL_START if ((m_nListenSocketFnum = socket(AF_INET6, SOCK_STREAM, 0)) < 0 ) { m_bIPv4 = true; m_nListenSocketFnum = socket(AF_INET, SOCK_STREAM, 0); } //LCOV_EXCL_STOP } else m_nListenSocketFnum = socket(AF_INET, SOCK_STREAM, 0); if (m_nListenSocketFnum < 0) { //LCOV_EXCL_START SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, "ListenToPort", O_INIT_PROCESS, F_SOCKET, errno, 0); goto bailout; //LCOV_EXCL_STOP } if(strncmp(m_TcpProcessName,"$ZTC0",5) != 0) { //LCOV_EXCL_START /* * bind to a specific interface (m_TcpProcessName is initialized by default to $ztc0) */ struct ifaddrs *ifa = NULL, *ifp = NULL; bool bFoundInterface = false; if (getifaddrs (&ifp) < 0) { SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, "ListenToPort - getifaddrs", O_INIT_PROCESS, F_SOCKET, errno, 0); goto bailout; } for (ifa = ifp; ifa != NULL; ifa = ifa->ifa_next) { if(! ifa->ifa_addr) continue; if( (m_bIPv4 == true && ifa->ifa_addr->sa_family != AF_INET) || (m_bIPv4 == false && ifa->ifa_addr->sa_family != AF_INET6) || (strcmp(ifa->ifa_name,m_TcpProcessName) != 0) ) continue; bFoundInterface = true; if(m_bIPv4 == false) { sin6 = (struct sockaddr_in6*)ifa->ifa_addr; memcpy(&m_ListenSocketAddr6,sin6,sizeof(m_ListenSocketAddr6)); m_ListenSocketAddr6.sin6_port = htons((uint16_t) port); break; } else { sin4 = (struct sockaddr_in*)ifa->ifa_addr; memcpy(&m_ListenSocketAddr,sin4,sizeof(m_ListenSocketAddr)); m_ListenSocketAddr.sin_port = htons((in_port_t) port); break; } } // for all interfaces freeifaddrs(ifp); if(!bFoundInterface) { SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, "ListenToPort - no matching interface", O_INIT_PROCESS, F_SOCKET, errno, 0); goto bailout; } //LCOV_EXCL_STOP } else { /* * bind to all available interfaces */ if (m_bIPv4 == false) { //LCOV_EXCL_START bzero((char*)&m_ListenSocketAddr6,sizeof(m_ListenSocketAddr6)); m_ListenSocketAddr6.sin6_family = AF_INET6; m_ListenSocketAddr6.sin6_addr = in6addr_any; m_ListenSocketAddr6.sin6_port = htons((uint16_t) port); //LCOV_EXCL_STOP } else { bzero((char*)&m_ListenSocketAddr,sizeof(m_ListenSocketAddr)); m_ListenSocketAddr.sin_family = AF_INET; m_ListenSocketAddr.sin_addr.s_addr = INADDR_ANY; m_ListenSocketAddr.sin_port = htons((in_port_t) port); } } int optVal = 1; error = setsockopt(m_nListenSocketFnum, SOL_SOCKET, SO_REUSEADDR, (char*)&optVal, sizeof(optVal)); if (error != 0) { //LCOV_EXCL_START SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, "ListenToPort", O_INIT_PROCESS, F_SETSOCOPT, errno, SO_REUSEADDR); goto bailout; //LCOV_EXCL_STOP } if (m_bIPv4 == false) error = bind(m_nListenSocketFnum, (struct sockaddr *)&m_ListenSocketAddr6, (int)sizeof(m_ListenSocketAddr6)); else error = bind(m_nListenSocketFnum, (struct sockaddr *)&m_ListenSocketAddr, (int)sizeof(m_ListenSocketAddr)); if (error < 0) { //LCOV_EXCL_START SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, "ListenToPort", O_INIT_PROCESS, F_BIND, errno, 0); goto bailout; //LCOV_EXCL_STOP } optVal = 1; error = setsockopt(m_nListenSocketFnum, SOL_SOCKET, SO_KEEPALIVE, (char*)&optVal, sizeof(optVal)); if (error != 0) { //LCOV_EXCL_START SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, "ListenToPort", O_INIT_PROCESS, F_SETSOCOPT, errno, SO_KEEPALIVE); goto bailout; //LCOV_EXCL_STOP } } error = listen(m_nListenSocketFnum, 100); FD_ZERO(&read_fds_); FD_ZERO(&error_fds_); if(error >= 0) { FD_SET(m_nListenSocketFnum,&read_fds_); FD_SET(m_nListenSocketFnum,&error_fds_); // Keep track of highest socket file descriptor, for use in "select" if (m_nListenSocketFnum > max_read_fd_) max_read_fd_ = m_nListenSocketFnum; } else { //LCOV_EXCL_START SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, E_LISTENER, "ListenToPort", O_INIT_PROCESS, F_ACCEPT, errno, 0); goto bailout; //LCOV_EXCL_STOP } // If tracing is enabled, display trace info indicating new "listen" LISTEN_ON_SOCKET((short)m_nListenSocketFnum); return true; bailout: if (m_nListenSocketFnum > 0) GTransport.m_TCPIPSystemSrvr_list->del_node(m_nListenSocketFnum); // closeTCPIPSession(m_nListenSocketFnum); m_nListenSocketFnum = -2; sprintf(tmp,"bailout ListenToPort[%d][%d]", port, m_nListenSocketFnum ); SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, tmp, O_INIT_PROCESS, F_SOCKET, 0, 0); return false; } void* CNSKListenerSrvr::OpenTCPIPSession() { CTCPIPSystemSrvr* pnode = NULL; int error; int nSocketFnum = -2; if (m_bIPv4 == false) { //LCOV_EXCL_START m_nAcceptFromSocketAddrLen = sizeof(m_AcceptFromSocketAddr6); nSocketFnum = accept(m_nListenSocketFnum, (sockaddr*)&m_AcceptFromSocketAddr6, (socklen_t *)&m_nAcceptFromSocketAddrLen); //LCOV_EXCL_STOP } else { m_nAcceptFromSocketAddrLen = sizeof(m_AcceptFromSocketAddr); nSocketFnum = accept(m_nListenSocketFnum, (sockaddr*)&m_AcceptFromSocketAddr, (socklen_t *)&m_nAcceptFromSocketAddrLen); } if(nSocketFnum == -1) { //LCOV_EXCL_START SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, "OpenTCPIPSession", O_INIT_PROCESS, F_ACCEPT, errno, 0); goto bailout; //LCOV_EXCL_STOP } pnode = GTransport.m_TCPIPSystemSrvr_list->ins_node(nSocketFnum); if (pnode == NULL) { //LCOV_EXCL_START SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, "OpenTCPIPSession", O_INIT_PROCESS, F_INS_NODE, SRVR_ERR_MEMORY_ALLOCATE, 0); goto bailout; //LCOV_EXCL_STOP } // clear/zero the set FD_ZERO(&read_fds_); FD_ZERO(&error_fds_); // (re)set the listening socket FD_SET(m_nListenSocketFnum,&read_fds_); FD_SET(m_nListenSocketFnum,&error_fds_); // (re) set the dummy pipe-read-fd FD_SET(pipefd[0],&read_fds_); FD_SET(pipefd[0],&error_fds_); //set the connected socket FD_SET(pnode->m_nSocketFnum,&read_fds_); FD_SET(pnode->m_nSocketFnum,&error_fds_); if (pnode->m_nSocketFnum > max_read_fd_) max_read_fd_ = pnode->m_nSocketFnum; m_nSocketFnum = (short) nSocketFnum; return pnode; bailout: if (pnode != NULL) GTransport.m_TCPIPSystemSrvr_list->del_node(nSocketFnum); SRVR::BreakDialogue(NULL); return NULL; } void * CNSKListenerSrvr::tcpip_listener(void *arg) { // Parameter is the CNSKListenerSrvr object CNSKListenerSrvr *listener = (CNSKListenerSrvr *) arg; int numReadyFds; int handledFds; ssize_t countRead; CTCPIPSystemSrvr* pnode=NULL; fd_set temp_read_fds, temp_error_fds; struct timeval timeout; struct timeval *pTimeout; msg_enable_open_cleanup(); file_enable_open_cleanup(); //create a the dummy pipe int rc = pipe(listener->pipefd); if (rc < 0) { listener->TRACE_UNKNOWN_INPUT(); SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, E_SERVER,"tcpip_listener", O_PIPE, F_INIT_PIPE,SRVR_ERR_UNKNOWN_REQUEST,0); listener->TCP_TRACE_OUTPUT_R0(); } FD_SET(listener->pipefd[0],&listener->read_fds_); FD_SET(listener->pipefd[0],&listener->error_fds_); if (listener->pipefd[0] > listener->max_read_fd_) listener->max_read_fd_ = listener->pipefd[0]; // Persistently wait for input on sockets and then act on it. while(listener->m_bTCPThreadKeepRunning) { // Wait for ready-to-read on any of the tcpip ports memcpy(&temp_read_fds, &listener->read_fds_, sizeof(temp_read_fds)); memcpy(&temp_error_fds, &listener->error_fds_, sizeof(temp_error_fds)); long connIdleTimeout = SRVR::getConnIdleTimeout(); long srvrIdleTimeout = SRVR::getSrvrIdleTimeout(); bool connIdleTimer = false; bool srvrIdleTimer = false; if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (connIdleTimeout != INFINITE_CONN_IDLE_TIMEOUT) { timeout.tv_sec = connIdleTimeout; timeout.tv_usec = 0; connIdleTimer = true; pTimeout = &timeout; } else { timeout.tv_sec = 0; timeout.tv_usec = 0; pTimeout = NULL; } } else { if (srvrIdleTimeout != INFINITE_SRVR_IDLE_TIMEOUT) { timeout.tv_sec = srvrIdleTimeout; timeout.tv_usec = 0; srvrIdleTimer = true; pTimeout = &timeout; } else { timeout.tv_sec = 0; timeout.tv_usec = 0; pTimeout = NULL; } } numReadyFds = select(listener->max_read_fd_+1, &temp_read_fds, NULL,&temp_error_fds, pTimeout); srvrGlobal->mutex->lock(); if (numReadyFds == -1) { if (errno == EINTR) { srvrGlobal->mutex->unlock(); continue; } else { SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, E_SERVER,"tcpip_listener", O_SELECT, F_SELECT,errno,numReadyFds); abort(); } } if (numReadyFds == 0) //Timeout expired { if (connIdleTimer) SRVR::BreakDialogue(NULL); else if (srvrIdleTimer) SRVR::srvrIdleTimerExpired(NULL); else { SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, E_SERVER,"tcpip_listener", O_SELECT, F_SELECT,errno,numReadyFds); abort(); } } else { // Handle all ready-to-read file descriptors handledFds = 0; if(FD_ISSET(listener->pipefd[0], &temp_read_fds)) { //dummy write, exit the loop listener->m_bTCPThreadKeepRunning = false; srvrGlobal->mutex->unlock(); break; } else if (FD_ISSET(listener->m_nListenSocketFnum,&temp_read_fds)) { // Initiate a new client session listener->OpenTCPIPSession(); listener->TRACE_INPUT((short)listener->m_nListenSocketFnum, 0, 0, 0); handledFds++; } else if ((pnode=GTransport.m_TCPIPSystemSrvr_list->m_current_node) != NULL && FD_ISSET(pnode->m_nSocketFnum,&temp_read_fds)) { short retries = 0; do { countRead = recv(pnode->m_nSocketFnum, pnode->m_IObuffer, MAX_TCP_BUFFER_LENGTH, 0); } while ((countRead < 0) && (errno == EINTR) && (retries++ < 3)); if (countRead <= 0) { GTransport.m_TCPIPSystemSrvr_list->del_node(pnode->m_nSocketFnum); SRVR::BreakDialogue(NULL); } else { pnode->m_rlength = countRead; if (listener->CheckTCPIPRequest(pnode) == NULL) { SRVR::BreakDialogue(NULL); } } handledFds++; } else { listener->TRACE_UNKNOWN_INPUT(); SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, E_SERVER,"tcpip_listener", O_SELECT, F_FD_ISSET,SRVR_ERR_UNKNOWN_REQUEST, -2); listener->TCP_TRACE_OUTPUT_R0(); handledFds++; } if(handledFds != numReadyFds) { listener->TRACE_UNKNOWN_INPUT(); SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, E_SERVER,"tcpip_listener", O_SELECT, F_FD_ISSET,SRVR_ERR_UNKNOWN_REQUEST,0); listener->TCP_TRACE_OUTPUT_R0(); } } srvrGlobal->mutex->unlock(); } //while(listener->m_bTCPThreadKeepRunning) return NULL; } int CNSKListenerSrvr::runProgram(char* TcpProcessName, long port, int TransportTrace) { short fnum,error; _cc_status cc; short timeout; unsigned short countRead; SB_Tag_Type tag; sprintf(m_TcpProcessName,"%s",TcpProcessName); m_port = port; INITIALIZE_TRACE(TransportTrace); if ((error = FILE_OPEN_("$RECEIVE",8,&m_ReceiveFnum, 0, 0, 1, 4000)) != 0) { SET_ERROR((long)0, NSK, FILE_SYSTEM, UNKNOWN_API, E_SERVER, "runProgram", O_INIT_PROCESS, F_FILE_OPEN_, error, 0); return error; } if (ListenToPort(port) == false) return SRVR_ERR_LISTENER_ERROR1; READUPDATEX(m_ReceiveFnum, m_RequestBuf, MAX_BUFFER_LENGTH ); // Register with association server SRVR::RegisterSrvr(srvrGlobal->IpAddress, srvrGlobal->HostName); // Start tcpip listener thread tcpip_tid = tcpip_listener_thr.create("TCPIP_listener", CNSKListenerSrvr::tcpip_listener, this); // Persistently wait for input on $RECEIVE and then act on it. while(m_bKeepRunning) { RESET_ERRORS((long)0); timeout = -1; fnum = m_ReceiveFnum; cc = AWAITIOX(&fnum, OMITREF, &countRead, &tag, timeout); if (_status_lt(cc)) // some error or XCANCEL { //LCOV_EXCL_START error=0; XFILE_GETINFO_(fnum, &error); if (error == 26) // XCANCEL was called { //join the tcpip thread if(tcpip_tid != 0) tcpip_listener_thr.join(tcpip_tid,NULL); m_bKeepRunning = false; break; } //LCOV_EXCL_STOP } TRACE_INPUT(fnum,countRead,tag,cc); if (fnum == m_ReceiveFnum) { ADD_ONE_TO_HANDLE(&m_call_id); CheckReceiveMessage(cc, countRead, &m_call_id); READUPDATEX(m_ReceiveFnum, m_RequestBuf, MAX_BUFFER_LENGTH ); FS_TRACE_OUTPUT(cc); } else { //LCOV_EXCL_START TRACE_UNKNOWN_INPUT(); SET_ERROR((long)0, NSK, TCPIP, UNKNOWN_API, E_SERVER, "runProgram", O_DO_WRITE_READ, F_FILE_COMPLETE, SRVR_ERR_UNKNOWN_REQUEST, fnum); //LCOV_EXCL_STOP } } return 0; } void CNSKListenerSrvr::SYSTEM_SNAMP(FILE* fp) { short info_ele; char obuffer[1000]; char* pbuffer = obuffer; int ip; ip=sprintf(pbuffer,"\t<----SYSTEM SNAP---->\n"); pbuffer +=ip; ip=sprintf(pbuffer,"\t\t%15.15s\t\t=\t\t%s(%d)\n","srvrState",frmt_serverstate(srvrGlobal->srvrState),srvrGlobal->srvrState); pbuffer +=ip; pbuffer = GTransport.m_FSystemSrvr_list->enum_nodes(pbuffer,fp); pbuffer = GTransport.m_TCPIPSystemSrvr_list->enum_nodes(pbuffer,fp); fwrite(obuffer, strlen(obuffer),1,fp); fwrite("\r\n",2,1,fp); fflush(fp); } void CNSKListenerSrvr::terminateThreads(int status) { // m_bKeepRunning = false; // break out of $RECEIVE and listen loop char dummyWriteBuffer[100]; // Calling sync of repository thread here instead of exitServerProcess() since // this also takes care of the case when the process is stopped via a system message. SyncPublicationThread(); if(syscall(__NR_gettid) == srvrGlobal->receiveThrId) { // we're in the $recv thread // If the tcp/ip thread is processing a request, the mutex will be locked // in which case, we'll wait for that request to complete. Once the request // is complete, the listen loop will exit out because m_bKeepRunning is false // If we're able to acquire the lock rightaway, it means the tcp/ip thread is // waiting on a select - we can then safely terminate the thread /* if(tcpip_tid != 0 && srvrGlobal->mutex->trylock() == 0) tcpip_listener_thr.cancel(tcpip_tid); */ // Dummy write if((tcpip_tid != 0) && (pipefd[1] != 0)) { strcpy(dummyWriteBuffer, "bye-bye tcp/ip thread!"); write(pipefd[1], dummyWriteBuffer, strlen(dummyWriteBuffer)); } //Wait tcpip thread to exit if(tcpip_tid != 0) tcpip_listener_thr.join(tcpip_tid,NULL); } else { // we're in the tcp/ip thread - we can just cancel the outstanding // readupdate posted on $receive and exit the thread int cc = XCANCEL(m_ReceiveFnum); tcpip_listener_thr.exit(NULL); } } bool CNSKListenerSrvr::verifyPortAvailable(const char * idForPort, int port) { char tmp[500]; int error; struct sockaddr_in6 *sin6 = NULL; struct sockaddr_in *sin4 = NULL; max_read_fd_ = 0; if (m_bIPv4 == false) { if ((m_nListenSocketFnum = socket(AF_INET6, SOCK_STREAM, 0)) < 0 ) { m_bIPv4 = true; m_nListenSocketFnum = socket(AF_INET, SOCK_STREAM, 0); } } else m_nListenSocketFnum = socket(AF_INET, SOCK_STREAM, 0); if (m_nListenSocketFnum < 0) { SET_WARNING((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, "verifyPortAvailable", O_INIT_PROCESS, F_SOCKET, errno, 0); return false; } /* * bind to all available interfaces */ if (m_bIPv4 == false) { bzero((char*)&m_ListenSocketAddr6,sizeof(m_ListenSocketAddr6)); m_ListenSocketAddr6.sin6_family = AF_INET6; m_ListenSocketAddr6.sin6_addr = in6addr_any; m_ListenSocketAddr6.sin6_port = htons((uint16_t) port); } else { bzero((char*)&m_ListenSocketAddr,sizeof(m_ListenSocketAddr)); m_ListenSocketAddr.sin_family = AF_INET; m_ListenSocketAddr.sin_addr.s_addr = INADDR_ANY; m_ListenSocketAddr.sin_port = htons((in_port_t) port); } int optVal = 1; error = setsockopt(m_nListenSocketFnum, SOL_SOCKET, SO_REUSEADDR, (char*)&optVal, sizeof(optVal)); if (error != 0) { SET_WARNING((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, "verifyPortAvailable", O_INIT_PROCESS, F_SETSOCOPT, errno, SO_REUSEADDR); return false; } if (m_bIPv4 == false) error = bind(m_nListenSocketFnum, (struct sockaddr *)&m_ListenSocketAddr6, (int)sizeof(m_ListenSocketAddr6)); else error = bind(m_nListenSocketFnum, (struct sockaddr *)&m_ListenSocketAddr, (int)sizeof(m_ListenSocketAddr)); if (error < 0) { sprintf(tmp,"verifyPortAvailable:[%d]",port); SET_WARNING((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, tmp, O_INIT_PROCESS, F_BIND, errno, 0); return false; } optVal = 1; error = setsockopt(m_nListenSocketFnum, SOL_SOCKET, SO_KEEPALIVE, (char*)&optVal, sizeof(optVal)); if (error != 0) { SET_WARNING((long)0, NSK, TCPIP, UNKNOWN_API, errorType_, "verifyPortAvailable", O_INIT_PROCESS, F_SETSOCOPT, errno, SO_KEEPALIVE); return false; } return true; }
1
20,377
it's better to use single name style in one name.
apache-trafodion
cpp
@@ -70,7 +70,7 @@ bool EprosimaServer::init() { //CREATE RTPSParticipant ParticipantAttributes PParam; - PParam.rtps.defaultSendPort = 10042; + //PParam.rtps.defaultSendPort = 10042; // TODO Create transport? PParam.rtps.builtin.domainId = 80; PParam.rtps.builtin.use_SIMPLE_EndpointDiscoveryProtocol = true; PParam.rtps.builtin.use_SIMPLE_RTPSParticipantDiscoveryProtocol = true;
1
// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @file EprosimaServer.cpp * */ #include "EprosimaServer.h" #include "fastrtps/fastrtps_all.h" using namespace eprosima::fastrtps; using namespace eprosima::fastrtps::rtps; using namespace clientserver; using namespace std; EprosimaServer::EprosimaServer(): mp_operation_sub(nullptr), mp_result_pub(nullptr), mp_participant(nullptr), mp_resultdatatype(nullptr), mp_operationdatatype(nullptr), m_n_served(0), m_operationsListener(nullptr), m_resultsListener(nullptr) { m_operationsListener.mp_up = this; m_resultsListener.mp_up = this; } EprosimaServer::~EprosimaServer() { if(mp_participant!=nullptr) { Domain::removeParticipant(mp_participant); } if(mp_resultdatatype!=nullptr) delete(mp_resultdatatype); if(mp_operationdatatype!=nullptr) delete(mp_operationdatatype); } void EprosimaServer::serve() { cout << "Enter a number to stop the server: "; int aux; std::cin >> aux; } void EprosimaServer::serve(uint32_t samples) { while(m_n_served < samples) eClock::my_sleep(100); } bool EprosimaServer::init() { //CREATE RTPSParticipant ParticipantAttributes PParam; PParam.rtps.defaultSendPort = 10042; PParam.rtps.builtin.domainId = 80; PParam.rtps.builtin.use_SIMPLE_EndpointDiscoveryProtocol = true; PParam.rtps.builtin.use_SIMPLE_RTPSParticipantDiscoveryProtocol = true; PParam.rtps.builtin.m_simpleEDP.use_PublicationReaderANDSubscriptionWriter = true; PParam.rtps.builtin.m_simpleEDP.use_PublicationWriterANDSubscriptionReader = true; PParam.rtps.builtin.leaseDuration = c_TimeInfinite; PParam.rtps.setName("server_RTPSParticipant"); mp_participant = Domain::createParticipant(PParam); if(mp_participant == nullptr) return false; //REGISTER TYPES mp_resultdatatype = new ResultDataType(); mp_operationdatatype = new OperationDataType(); Domain::registerType(mp_participant,mp_resultdatatype); Domain::registerType(mp_participant,mp_operationdatatype); // DATA PUBLISHER PublisherAttributes PubDataparam; PubDataparam.topic.topicDataType = "Result"; PubDataparam.topic.topicKind = NO_KEY; PubDataparam.topic.topicName = "Results"; PubDataparam.topic.historyQos.kind = KEEP_LAST_HISTORY_QOS; PubDataparam.topic.historyQos.depth = 1000; PubDataparam.topic.resourceLimitsQos.max_samples = 1500; PubDataparam.topic.resourceLimitsQos.allocated_samples = 1000; PubDataparam.qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS; mp_result_pub = Domain::createPublisher(mp_participant,PubDataparam,(PublisherListener*)&this->m_resultsListener); if(mp_result_pub == nullptr) return false; //DATA SUBSCRIBER SubscriberAttributes SubDataparam; Locator_t loc; loc.set_port(7555); PubDataparam.unicastLocatorList.push_back(loc); SubDataparam.topic.topicDataType = "Operation"; SubDataparam.topic.topicKind = NO_KEY; SubDataparam.topic.topicName = "Operations"; SubDataparam.topic.historyQos.kind = KEEP_LAST_HISTORY_QOS; SubDataparam.topic.historyQos.depth = 1000; SubDataparam.topic.resourceLimitsQos.max_samples = 1500; SubDataparam.topic.resourceLimitsQos.allocated_samples = 1000; mp_operation_sub = Domain::createSubscriber(mp_participant,SubDataparam,(SubscriberListener*)&this->m_operationsListener); if(mp_operation_sub == nullptr) return false; return true; } Result::RESULTTYPE EprosimaServer::calculate(Operation::OPERATIONTYPE type, int32_t num1, int32_t num2, int32_t* result) { switch(type) { case Operation::SUBTRACTION: { *result = num1-num2; break; } case Operation::ADDITION: { *result = num1+num2; break; } case Operation::MULTIPLICATION: { *result = num1*num2; break; } case Operation::DIVISION: { if(num2 == 0) return Result::ERROR_RESULT; break; } } return Result::GOOD_RESULT; } void EprosimaServer::OperationListener::onNewDataMessage(Subscriber*) { mp_up->mp_operation_sub->takeNextData((void*)&m_operation,&m_sampleInfo); if(m_sampleInfo.sampleKind == ALIVE) { ++mp_up->m_n_served; m_result.m_guid = m_operation.m_guid; m_result.m_operationId = m_operation.m_operationId; m_result.m_result = 0; m_result.m_resultType = mp_up->calculate(m_operation.m_operationType, m_operation.m_num1,m_operation.m_num2,&m_result.m_result); mp_up->mp_result_pub->write((void*)&m_result); } } void EprosimaServer::OperationListener::onSubscriptionMatched(Subscriber*, MatchingInfo&) { } void EprosimaServer::ResultListener::onPublicationMatched(Publisher*, MatchingInfo&) { }
1
12,893
As defaultSendPort is being removed, and I don't like TODOs on examples, please remove the whole line
eProsima-Fast-DDS
cpp
@@ -95,7 +95,8 @@ func (cf CloudFormation) DelegateDNSPermissions(project *archer.Project, account deployProject.DNSDelegationAccounts = append(dnsDelegatedAccounts, accountID) updatedProjectConfig := stack.NewProjectStackConfig(&deployProject) - if err := cf.update(updatedProjectConfig); err != nil { + // swallow the errChangeSetEmpty error since it just means there were no updates needed. + if err := cf.update(updatedProjectConfig); err != nil && err != errChangeSetEmpty { return fmt.Errorf("updating project to allow DNS delegation: %w", err) }
1
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cloudformation import ( "context" "errors" "fmt" "time" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy/cloudformation/stack" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface" ) const ( maxDeleteStackSetAttempts = 10 deleteStackSetSleepDuration = 30 * time.Second ) // DeployProject sets up everything required for our project-wide resources. // These resources include things that are regional, rather than scoped to a particular // environment, such as ECR Repos, CodePipeline KMS keys & S3 buckets. // We deploy project resources through StackSets - that way we can have one // template that we update and all regional stacks are updated. func (cf CloudFormation) DeployProject(in *deploy.CreateProjectInput) error { projectConfig := stack.NewProjectStackConfig(in) // First deploy the project roles needed by StackSets. These roles // allow the stack set to set up our regional stacks. if err := cf.create(projectConfig); err == nil { _, err := cf.waitForStackCreation(projectConfig) if err != nil { return err } } else { // If the stack already exists - we can move on // to creating the StackSet. var alreadyExists *ErrStackAlreadyExists if !errors.As(err, &alreadyExists) { return err } } blankProjectTemplate, err := projectConfig.ResourceTemplate(&stack.ProjectResourcesConfig{ Project: projectConfig.Project, }) if err != nil { return err } _, err = cf.client.CreateStackSet(&cloudformation.CreateStackSetInput{ Description: aws.String(projectConfig.StackSetDescription()), StackSetName: aws.String(projectConfig.StackSetName()), TemplateBody: aws.String(blankProjectTemplate), ExecutionRoleName: aws.String(projectConfig.StackSetExecutionRoleName()), AdministrationRoleARN: aws.String(projectConfig.StackSetAdminRoleARN()), Tags: projectConfig.Tags(), }) if err != nil && !stackSetExists(err) { return err } return nil } // DelegateDNSPermissions grants the provided account ID the ability to write to this project's // DNS HostedZone. This allows us to perform cross account DNS delegation. func (cf CloudFormation) DelegateDNSPermissions(project *archer.Project, accountID string) error { deployProject := deploy.CreateProjectInput{ Project: project.Name, AccountID: project.AccountID, DomainName: project.Domain, } projectConfig := stack.NewProjectStackConfig(&deployProject) describeStack := cloudformation.DescribeStacksInput{ StackName: aws.String(projectConfig.StackName()), } projectStack, err := cf.describeStack(&describeStack) if err != nil { return fmt.Errorf("getting existing project infrastructure stack: %w", err) } dnsDelegatedAccounts := stack.DNSDelegatedAccountsForStack(projectStack) deployProject.DNSDelegationAccounts = append(dnsDelegatedAccounts, accountID) updatedProjectConfig := stack.NewProjectStackConfig(&deployProject) if err := cf.update(updatedProjectConfig); err != nil { return fmt.Errorf("updating project to allow DNS delegation: %w", err) } return cf.client.WaitUntilStackUpdateCompleteWithContext(context.Background(), &describeStack, cf.waiters...) } // GetProjectResourcesByRegion fetches all the regional resources for a particular region. func (cf CloudFormation) GetProjectResourcesByRegion(project *archer.Project, region string) (*archer.ProjectRegionalResources, error) { resources, err := cf.getResourcesForStackInstances(project, &region) if err != nil { return nil, fmt.Errorf("describing project resources: %w", err) } if len(resources) == 0 { return nil, fmt.Errorf("no regional resources for project %s in region %s found", project.Name, region) } return resources[0], nil } // GetRegionalProjectResources fetches all the regional resources for a particular project. func (cf CloudFormation) GetRegionalProjectResources(project *archer.Project) ([]*archer.ProjectRegionalResources, error) { resources, err := cf.getResourcesForStackInstances(project, nil) if err != nil { return nil, fmt.Errorf("describing project resources: %w", err) } return resources, nil } func (cf CloudFormation) getResourcesForStackInstances(project *archer.Project, region *string) ([]*archer.ProjectRegionalResources, error) { projectConfig := stack.NewProjectStackConfig(&deploy.CreateProjectInput{ Project: project.Name, AccountID: project.AccountID, }) listStackInstancesInput := &cloudformation.ListStackInstancesInput{ StackSetName: aws.String(projectConfig.StackSetName()), StackInstanceAccount: aws.String(project.AccountID), } if region != nil { listStackInstancesInput.StackInstanceRegion = region } stackInstances, err := cf.client.ListStackInstances(listStackInstancesInput) if err != nil { return nil, fmt.Errorf("listing stack instances: %w", err) } regionalResources := []*archer.ProjectRegionalResources{} for _, stackInstance := range stackInstances.Summaries { // Since these stacks will likely be in another region, we can't use // the default cf client. Instead, we'll have to create a new client // configured with the stack's region. regionAwareCFClient := cf.regionalClientProvider.Client(*stackInstance.Region) cfStack, err := cf.describeStackWithClient(&cloudformation.DescribeStacksInput{ StackName: stackInstance.StackId, }, regionAwareCFClient) if err != nil { return nil, fmt.Errorf("getting outputs for stack %s in region %s: %w", *stackInstance.StackId, *stackInstance.Region, err) } regionalResource, err := stack.ToProjectRegionalResources(cfStack) if err != nil { return nil, err } regionalResource.Region = *stackInstance.Region regionalResources = append(regionalResources, regionalResource) } return regionalResources, nil } // AddAppToProject attempts to add new App specific resources to the Project resource stack. // Currently, this means that we'll set up an ECR repo with a policy for all envs to be able // to pull from it. func (cf CloudFormation) AddAppToProject(project *archer.Project, appName string) error { projectConfig := stack.NewProjectStackConfig(&deploy.CreateProjectInput{ Project: project.Name, AccountID: project.AccountID, }) previouslyDeployedConfig, err := cf.getLastDeployedProjectConfig(projectConfig) if err != nil { return fmt.Errorf("adding %s app resources to project %s: %w", appName, project.Name, err) } // We'll generate a new list of Accounts to add to our project // infrastructure by appending the environment's account if it // doesn't already exist. var appList []string shouldAddNewApp := true for _, app := range previouslyDeployedConfig.Apps { appList = append(appList, app) if app == appName { shouldAddNewApp = false } } if !shouldAddNewApp { return nil } appList = append(appList, appName) newDeploymentConfig := stack.ProjectResourcesConfig{ Version: previouslyDeployedConfig.Version + 1, Apps: appList, Accounts: previouslyDeployedConfig.Accounts, Project: projectConfig.Project, } if err := cf.deployProjectConfig(projectConfig, &newDeploymentConfig); err != nil { return fmt.Errorf("adding %s app resources to project: %w", appName, err) } return nil } // RemoveAppFromProject attempts to remove App specific resources (ECR repositories) from the Project resource stack. func (cf CloudFormation) RemoveAppFromProject(project *archer.Project, appName string) error { projectConfig := stack.NewProjectStackConfig(&deploy.CreateProjectInput{ Project: project.Name, AccountID: project.AccountID, }) previouslyDeployedConfig, err := cf.getLastDeployedProjectConfig(projectConfig) if err != nil { return fmt.Errorf("get previous project %s config: %w", project.Name, err) } // We'll generate a new list of Accounts to remove the account associated // with the input app to be removed. var appList []string shouldRemoveApp := false for _, app := range previouslyDeployedConfig.Apps { if app == appName { shouldRemoveApp = true continue } appList = append(appList, app) } if !shouldRemoveApp { return nil } newDeploymentConfig := stack.ProjectResourcesConfig{ Version: previouslyDeployedConfig.Version + 1, Apps: appList, Accounts: previouslyDeployedConfig.Accounts, Project: projectConfig.Project, } if err := cf.deployProjectConfig(projectConfig, &newDeploymentConfig); err != nil { return fmt.Errorf("removing %s app resources from project: %w", appName, err) } return nil } // AddEnvToProject takes a new environment and updates the Project configuration // with new Account IDs in resource policies (KMS Keys and ECR Repos) - and // sets up a new stack instance if the environment is in a new region. func (cf CloudFormation) AddEnvToProject(project *archer.Project, env *archer.Environment) error { projectConfig := stack.NewProjectStackConfig(&deploy.CreateProjectInput{ Project: project.Name, AccountID: project.AccountID, }) previouslyDeployedConfig, err := cf.getLastDeployedProjectConfig(projectConfig) if err != nil { return fmt.Errorf("getting previous deployed stackset %w", err) } // We'll generate a new list of Accounts to add to our project // infrastructure by appending the environment's account if it // doesn't already exist. var accountList []string shouldAddNewAccountID := true for _, accountID := range previouslyDeployedConfig.Accounts { accountList = append(accountList, accountID) if accountID == env.AccountID { shouldAddNewAccountID = false } } if shouldAddNewAccountID { accountList = append(accountList, env.AccountID) } newDeploymentConfig := stack.ProjectResourcesConfig{ Version: previouslyDeployedConfig.Version + 1, Apps: previouslyDeployedConfig.Apps, Accounts: accountList, Project: projectConfig.Project, } if err := cf.deployProjectConfig(projectConfig, &newDeploymentConfig); err != nil { return fmt.Errorf("adding %s environment resources to project: %w", env.Name, err) } if err := cf.addNewProjectStackInstances(projectConfig, env.Region); err != nil { return fmt.Errorf("adding new stack instance for environment %s: %w", env.Name, err) } return nil } var getRegionFromClient = func(client cloudformationiface.CloudFormationAPI) (string, error) { concrete, ok := client.(*cloudformation.CloudFormation) if !ok { return "", errors.New("failed to retrieve the region") } return *concrete.Client.Config.Region, nil } // AddPipelineResourcesToProject conditionally adds resources needed to support // a pipeline in the project region (i.e. the same region that hosts our SSM store). // This is necessary because the project region might not contain any environment. func (cf CloudFormation) AddPipelineResourcesToProject( project *archer.Project, projectRegion string) error { projectConfig := stack.NewProjectStackConfig(&deploy.CreateProjectInput{ Project: project.Name, AccountID: project.AccountID, }) // conditionally create a new stack instance in the project region // if there's no existing stack instance. if err := cf.addNewProjectStackInstances(projectConfig, projectRegion); err != nil { return fmt.Errorf("failed to add stack instance for pipeline, project: %s, region: %s, error: %w", project.Name, projectRegion, err) } return nil } func (cf CloudFormation) deployProjectConfig(projectConfig *stack.ProjectStackConfig, resources *stack.ProjectResourcesConfig) error { newTemplateToDeploy, err := projectConfig.ResourceTemplate(resources) if err != nil { return err } // Every time we deploy the StackSet, we include a version field in the stack metadata. // When we go to update the StackSet, we include that version + 1 as the "Operation ID". // This ensures that we don't overwrite any changes that may have been applied between // us reading the stack and actually updating it. // As an example: // * We read the stack with Version 1 // * Someone else reads the stack with Version 1 // * We update the StackSet with Version 2, the update completes. // * Someone else tries to update the StackSet with their stale version 2. // * "2" has already been used as an operation ID, and the stale write fails. input := cloudformation.UpdateStackSetInput{ TemplateBody: aws.String(newTemplateToDeploy), OperationId: aws.String(fmt.Sprintf("%d", resources.Version)), StackSetName: aws.String(projectConfig.StackSetName()), Description: aws.String(projectConfig.StackSetDescription()), ExecutionRoleName: aws.String(projectConfig.StackSetExecutionRoleName()), AdministrationRoleARN: aws.String(projectConfig.StackSetAdminRoleARN()), Tags: projectConfig.Tags(), } output, err := cf.client.UpdateStackSet(&input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case cloudformation.ErrCodeOperationIdAlreadyExistsException, cloudformation.ErrCodeOperationInProgressException, cloudformation.ErrCodeStaleRequestException: return &ErrStackSetOutOfDate{projectName: projectConfig.Project, parentErr: err} } } return fmt.Errorf("updating project resources: %w", err) } return cf.waitForStackSetOperation(projectConfig.StackSetName(), *output.OperationId) } // addNewStackInstances takes an environment and determines if we need to create a new // stack instance. We only spin up a new stack instance if the env is in a new region. func (cf CloudFormation) addNewProjectStackInstances(projectConfig *stack.ProjectStackConfig, region string) error { stackInstances, err := cf.client.ListStackInstances(&cloudformation.ListStackInstancesInput{ StackSetName: aws.String(projectConfig.StackSetName()), }) if err != nil { return fmt.Errorf("fetching existing project stack instances: %w", err) } // We only want to deploy a new StackInstance if we're // adding an environment in a new region. shouldDeployNewStackInstance := true for _, stackInstance := range stackInstances.Summaries { if *stackInstance.Region == region { shouldDeployNewStackInstance = false } } if !shouldDeployNewStackInstance { return nil } // Set up a new Stack Instance for the new region. The Stack Instance will inherit // the latest StackSet template. createStacksOutput, err := cf.client.CreateStackInstances(&cloudformation.CreateStackInstancesInput{ Accounts: []*string{aws.String(projectConfig.AccountID)}, Regions: []*string{aws.String(region)}, StackSetName: aws.String(projectConfig.StackSetName()), }) if err != nil { return fmt.Errorf("creating new project stack instances: %w", err) } return cf.waitForStackSetOperation(projectConfig.StackSetName(), *createStacksOutput.OperationId) } func (cf CloudFormation) getLastDeployedProjectConfig(projectConfig *stack.ProjectStackConfig) (*stack.ProjectResourcesConfig, error) { // Check the existing deploy stack template. From that template, we'll parse out the list of apps and accounts that // are deployed in the stack. describeOutput, err := cf.client.DescribeStackSet(&cloudformation.DescribeStackSetInput{ StackSetName: aws.String(projectConfig.StackSetName()), }) if err != nil { return nil, fmt.Errorf("describe stack set: %w", err) } previouslyDeployedConfig, err := stack.ProjectConfigFrom(describeOutput.StackSet.TemplateBody) if err != nil { return nil, fmt.Errorf("parse previous deployed stackset %w", err) } return previouslyDeployedConfig, nil } func (cf CloudFormation) waitForStackSetOperation(stackSetName, operationID string) error { for { response, err := cf.client.DescribeStackSetOperation(&cloudformation.DescribeStackSetOperationInput{ OperationId: aws.String(operationID), StackSetName: aws.String(stackSetName), }) if err != nil { return fmt.Errorf("fetching stack set operation status: %w", err) } if *response.StackSetOperation.Status == "STOPPED" { return fmt.Errorf("project operation %s in stack set %s was manually stopped", operationID, stackSetName) } if *response.StackSetOperation.Status == "FAILED" { return fmt.Errorf("project operation %s in stack set %s failed", operationID, stackSetName) } if *response.StackSetOperation.Status == "SUCCEEDED" { return nil } time.Sleep(3 * time.Second) } } // DeleteProject deletes all project specific StackSet and Stack resources. func (cf CloudFormation) DeleteProject(projectName string) error { stackSetName := fmt.Sprintf("%s-infrastructure", projectName) if err := cf.deleteProjectStackSet(stackSetName); err != nil { return err } return cf.delete(fmt.Sprintf("%s-infrastructure-roles", projectName)) } func (cf CloudFormation) deleteProjectStackSet(stackSetName string) error { stackInstances, err := cf.client.ListStackInstances(&cloudformation.ListStackInstancesInput{ StackSetName: aws.String(stackSetName), }) if err != nil { // If the stackset doesn't exist - just move on. if stackSetDoesNotExist(err) { return nil } return fmt.Errorf("fetching existing project stack instances: %w", err) } // We want to delete all the stack instances, so we create // a set of account ids and regions. accountSet := map[string]bool{} regionSet := map[string]bool{} for _, summary := range stackInstances.Summaries { accountSet[*summary.Account] = true regionSet[*summary.Region] = true } var regions []string var accounts []string for key := range accountSet { accounts = append(accounts, key) } for key := range regionSet { regions = append(regions, key) } // Delete the Stack Instances for those accounts and regions. if len(stackInstances.Summaries) > 0 { operation, err := cf.client.DeleteStackInstances(&cloudformation.DeleteStackInstancesInput{ Accounts: aws.StringSlice(accounts), RetainStacks: aws.Bool(false), Regions: aws.StringSlice(regions), StackSetName: aws.String(stackSetName), }) if err != nil { return fmt.Errorf("DeleteStackInstances for stackset %s, accounts %s, and regions %s: %w", stackSetName, accounts, regions, err) } if err := cf.waitForStackSetOperation(stackSetName, *operation.OperationId); err != nil { return fmt.Errorf("Waiting for stackset %s to be deleted: %w", stackSetName, err) } } // Delete the StackSet now that the stack set instances are deleted. if _, err := cf.client.DeleteStackSet(&cloudformation.DeleteStackSetInput{ StackSetName: aws.String(stackSetName), }); err != nil { // If the StackSet doesn't exist, that's fine, move on. if !stackSetDoesNotExist(err) { return err } } return nil }
1
12,243
Should we return `nil` when `err == errChangeSetEmpty` instead of calling l.103 `WaitUntilStackUpdateCompleteWithContext`?
aws-copilot-cli
go
@@ -153,13 +153,12 @@ public abstract class VectorWriter implements Closeable { private final DocIDMerger<VectorValuesSub> docIdMerger; private final int[] ordBase; private final int cost; - private final int size; + private int size; private int docId; private VectorValuesSub current; - // For each doc with a vector, record its ord in the segments being merged. This enables random - // access into the - // unmerged segments using the ords from the merged segment. + /* For each doc with a vector, record its ord in the segments being merged. This enables random access into the unmerged segments using the ords from the merged segment. + */ private int[] ordMap; private int ord;
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.codecs; import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.apache.lucene.index.DocIDMerger; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.MergeState; import org.apache.lucene.index.RandomAccessVectorValues; import org.apache.lucene.index.RandomAccessVectorValuesProducer; import org.apache.lucene.index.VectorValues; import org.apache.lucene.search.TopDocs; import org.apache.lucene.util.BytesRef; /** Writes vectors to an index. */ public abstract class VectorWriter implements Closeable { /** Sole constructor */ protected VectorWriter() {} /** Write all values contained in the provided reader */ public abstract void writeField(FieldInfo fieldInfo, VectorValues values) throws IOException; /** Called once at the end before close */ public abstract void finish() throws IOException; /** Merge the vector values from multiple segments, for all fields */ public void merge(MergeState mergeState) throws IOException { for (int i = 0; i < mergeState.fieldInfos.length; i++) { VectorReader reader = mergeState.vectorReaders[i]; assert reader != null || mergeState.fieldInfos[i].hasVectorValues() == false; if (reader != null) { reader.checkIntegrity(); } } for (FieldInfo fieldInfo : mergeState.mergeFieldInfos) { if (fieldInfo.hasVectorValues()) { mergeVectors(fieldInfo, mergeState); } } finish(); } private void mergeVectors(FieldInfo mergeFieldInfo, final MergeState mergeState) throws IOException { if (mergeState.infoStream.isEnabled("VV")) { mergeState.infoStream.message("VV", "merging " + mergeState.segmentInfo); } List<VectorValuesSub> subs = new ArrayList<>(); int dimension = -1; VectorValues.SearchStrategy searchStrategy = null; int nonEmptySegmentIndex = 0; for (int i = 0; i < mergeState.vectorReaders.length; i++) { VectorReader vectorReader = mergeState.vectorReaders[i]; if (vectorReader != null) { if (mergeFieldInfo != null && mergeFieldInfo.hasVectorValues()) { int segmentDimension = mergeFieldInfo.getVectorDimension(); VectorValues.SearchStrategy segmentSearchStrategy = mergeFieldInfo.getVectorSearchStrategy(); if (dimension == -1) { dimension = segmentDimension; searchStrategy = mergeFieldInfo.getVectorSearchStrategy(); } else if (dimension != segmentDimension) { throw new IllegalStateException( "Varying dimensions for vector-valued field " + mergeFieldInfo.name + ": " + dimension + "!=" + segmentDimension); } else if (searchStrategy != segmentSearchStrategy) { throw new IllegalStateException( "Varying search strategys for vector-valued field " + mergeFieldInfo.name + ": " + searchStrategy + "!=" + segmentSearchStrategy); } VectorValues values = vectorReader.getVectorValues(mergeFieldInfo.name); if (values != null) { subs.add(new VectorValuesSub(nonEmptySegmentIndex++, mergeState.docMaps[i], values)); } } } } // Create a new VectorValues by iterating over the sub vectors, mapping the resulting // docids using docMaps in the mergeState. if (subs.size() > 0) { writeField(mergeFieldInfo, new VectorValuesMerger(subs, mergeState)); } if (mergeState.infoStream.isEnabled("VV")) { mergeState.infoStream.message("VV", "merge done " + mergeState.segmentInfo); } } /** Tracks state of one sub-reader that we are merging */ private static class VectorValuesSub extends DocIDMerger.Sub { final MergeState.DocMap docMap; final VectorValues values; final int segmentIndex; int count; VectorValuesSub(int segmentIndex, MergeState.DocMap docMap, VectorValues values) { super(docMap); this.values = values; this.segmentIndex = segmentIndex; this.docMap = docMap; assert values.docID() == -1; } @Override public int nextDoc() throws IOException { int docId = values.nextDoc(); if (docId != NO_MORE_DOCS) { // Note: this does count deleted docs since they are present in the to-be-merged segment ++count; } return docId; } } /** * View over multiple VectorValues supporting iterator-style access via DocIdMerger. Maintains a * reverse ordinal mapping for documents having values in order to support random access by dense * ordinal. */ private static class VectorValuesMerger extends VectorValues implements RandomAccessVectorValuesProducer { private final List<VectorValuesSub> subs; private final DocIDMerger<VectorValuesSub> docIdMerger; private final int[] ordBase; private final int cost; private final int size; private int docId; private VectorValuesSub current; // For each doc with a vector, record its ord in the segments being merged. This enables random // access into the // unmerged segments using the ords from the merged segment. private int[] ordMap; private int ord; VectorValuesMerger(List<VectorValuesSub> subs, MergeState mergeState) throws IOException { this.subs = subs; docIdMerger = DocIDMerger.of(subs, mergeState.needsIndexSort); int totalCost = 0, totalSize = 0; for (VectorValuesSub sub : subs) { totalCost += sub.values.cost(); totalSize += sub.values.size(); } cost = totalCost; size = totalSize; ordMap = new int[size]; ordBase = new int[subs.size()]; int lastBase = 0; for (int k = 0; k < subs.size(); k++) { int size = subs.get(k).values.size(); ordBase[k] = lastBase; lastBase += size; } docId = -1; } @Override public int docID() { return docId; } @Override public int nextDoc() throws IOException { current = docIdMerger.next(); if (current == null) { docId = NO_MORE_DOCS; } else { docId = current.mappedDocID; ordMap[ord++] = ordBase[current.segmentIndex] + current.count - 1; } return docId; } @Override public float[] vectorValue() throws IOException { return current.values.vectorValue(); } @Override public BytesRef binaryValue() throws IOException { return current.values.binaryValue(); } @Override public RandomAccessVectorValues randomAccess() { return new MergerRandomAccess(); } @Override public int advance(int target) { throw new UnsupportedOperationException(); } @Override public int size() { return size; } @Override public long cost() { return cost; } @Override public int dimension() { return subs.get(0).values.dimension(); } @Override public SearchStrategy searchStrategy() { return subs.get(0).values.searchStrategy(); } @Override public TopDocs search(float[] target, int k, int fanout) throws IOException { throw new UnsupportedOperationException(); } class MergerRandomAccess implements RandomAccessVectorValues { private final List<RandomAccessVectorValues> raSubs; MergerRandomAccess() { raSubs = new ArrayList<>(subs.size()); for (VectorValuesSub sub : subs) { if (sub.values instanceof RandomAccessVectorValuesProducer) { raSubs.add(((RandomAccessVectorValuesProducer) sub.values).randomAccess()); } else { throw new IllegalStateException( "Cannot merge VectorValues without support for random access"); } } } @Override public int size() { return size; } @Override public int dimension() { return VectorValuesMerger.this.dimension(); } @Override public SearchStrategy searchStrategy() { return VectorValuesMerger.this.searchStrategy(); } @Override public float[] vectorValue(int target) throws IOException { int unmappedOrd = ordMap[target]; int segmentOrd = Arrays.binarySearch(ordBase, unmappedOrd); if (segmentOrd < 0) { // get the index of the greatest lower bound segmentOrd = -2 - segmentOrd; } while (segmentOrd < ordBase.length - 1 && ordBase[segmentOrd + 1] == ordBase[segmentOrd]) { // forward over empty segments which will share the same ordBase segmentOrd++; } return raSubs.get(segmentOrd).vectorValue(unmappedOrd - ordBase[segmentOrd]); } @Override public BytesRef binaryValue(int targetOrd) throws IOException { throw new UnsupportedOperationException(); } } } }
1
39,677
Hmmm I thought spotless would wrap this line, but it doesn't seem to complain about it
apache-lucene-solr
java
@@ -47,10 +47,11 @@ type Selector struct { // SpiffeIDSpec defines the desired state of SpiffeID type SpiffeIDSpec struct { - ParentId string `json:"parentId"` - SpiffeId string `json:"spiffeId"` - Selector Selector `json:"selector"` - DnsNames []string `json:"dnsNames,omitempty"` + ParentId string `json:"parentId"` + SpiffeId string `json:"spiffeId"` + Selector Selector `json:"selector"` + DnsNames []string `json:"dnsNames,omitempty"` + FederatesWith []string `json:"federatesWith,omitempty"` } // SpiffeIDStatus defines the observed state of SpiffeID
1
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) type Selector struct { // Cluster is the k8s_psat cluster Cluster string `json:"cluster,omitempty"` // AgentNodeUid is the UID Of the node AgentNodeUid types.UID `json:"agent_node_uid,omitempty"` // Pod label name/value to match for this spiffe ID PodLabel map[string]string `json:"podLabel,omitempty"` // Pod name to match for this spiffe ID PodName string `json:"podName,omitempty"` // Pod UID to match for this spiffe ID PodUid types.UID `json:"podUid,omitempty"` // Namespace to match for this spiffe ID Namespace string `json:"namespace,omitempty"` // ServiceAccount to match for this spiffe ID ServiceAccount string `json:"serviceAccount,omitempty"` // ContainerImage to match for this spiffe ID ContainerImage string `json:"containerImage,omitempty"` // ContainerName to match for this spiffe ID ContainerName string `json:"containerName,omitempty"` // NodeName to match for this spiffe ID NodeName string `json:"nodeName,omitempty"` // Arbitrary k8s selectors Arbitrary []string `json:"arbitrary,omitempty"` } // SpiffeIDSpec defines the desired state of SpiffeID type SpiffeIDSpec struct { ParentId string `json:"parentId"` SpiffeId string `json:"spiffeId"` Selector Selector `json:"selector"` DnsNames []string `json:"dnsNames,omitempty"` } // SpiffeIDStatus defines the observed state of SpiffeID type SpiffeIDStatus struct { EntryId *string `json:"entryId,omitempty"` } // SpiffeID is the Schema for the SpiffeIds API type SpiffeID struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec SpiffeIDSpec `json:"spec,omitempty"` Status SpiffeIDStatus `json:"status,omitempty"` } // SpiffeIDList contains a list of SpiffeID type SpiffeIDList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []SpiffeID `json:"items"` } func init() { SchemeBuilder.Register(&SpiffeID{}, &SpiffeIDList{}) }
1
16,095
I think this will require updating the api version. we are planning to do this anyways, so perhaps we can lump this in so we don't have to update too many times?
spiffe-spire
go
@@ -215,13 +215,12 @@ public abstract class RSSceneMixin implements RSScene for (int z = minLevel; z < maxY; ++z) { - RSTile[][] planeTiles = tiles[z]; for (int x = minTileX; x < maxTileX; ++x) { for (int y = minTileZ; y < maxTileZ; ++y) { - RSTile tile = planeTiles[x][y]; + RSTile tile = tiles[z][x][y]; if (tile != null) { if (tile.getPhysicalLevel() <= plane
1
/* * Copyright (c) 2018 Abex * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package net.runelite.mixins; import net.runelite.api.Entity; import net.runelite.api.Perspective; import net.runelite.api.Tile; import net.runelite.api.TileModel; import net.runelite.api.TilePaint; import net.runelite.api.coords.WorldPoint; import net.runelite.api.hooks.DrawCallbacks; import net.runelite.api.mixins.Copy; import net.runelite.api.mixins.Inject; import net.runelite.api.mixins.MethodHook; import net.runelite.api.mixins.Mixin; import net.runelite.api.mixins.Replace; import net.runelite.api.mixins.Shadow; import net.runelite.rs.api.RSBoundaryObject; import net.runelite.rs.api.RSClient; import net.runelite.rs.api.RSFloorDecoration; import net.runelite.rs.api.RSNodeDeque; import net.runelite.rs.api.RSScene; import net.runelite.rs.api.RSTile; import net.runelite.rs.api.RSTileItem; import net.runelite.rs.api.RSTileItemPile; import net.runelite.rs.api.RSTileModel; import net.runelite.rs.api.RSWallDecoration; @Mixin(RSScene.class) public abstract class RSSceneMixin implements RSScene { private static final int INVALID_HSL_COLOR = 12345678; private static final int DEFAULT_DISTANCE = 25; private static final int PITCH_LOWER_LIMIT = 128; private static final int PITCH_UPPER_LIMIT = 383; @Shadow("client") static RSClient client; @Shadow("pitchRelaxEnabled") private static boolean pitchRelaxEnabled; @Shadow("hdMinimapEnabled") private static boolean hdMinimapEnabled; @Shadow("Rasterizer3D_colorPalette") private static int[] colorPalette; @Inject private static int[] tmpX = new int[6]; @Shadow("skyboxColor") static int skyboxColor; @Inject private static int[] tmpY = new int[6]; @Inject private static int rl$drawDistance; @Replace("draw") void rl$drawScene(int cameraX, int cameraY, int cameraZ, int cameraPitch, int cameraYaw, int plane) { final DrawCallbacks drawCallbacks = client.getDrawCallbacks(); if (drawCallbacks != null) { drawCallbacks.drawScene(cameraX, cameraY, cameraZ, cameraPitch, cameraYaw, plane); } final boolean isGpu = client.isGpu(); final boolean checkClick = client.isCheckClick(); if (!client.isMenuOpen()) { // Force check click to update the selected tile client.setCheckClick(true); final int mouseX = client.getMouseX(); final int mouseY = client.getMouseY(); client.setMouseCanvasHoverPositionX(mouseX - client.getViewportXOffset()); client.setMouseCanvasHoverPositionY(mouseY - client.getViewportYOffset()); } if (!isGpu) { if (skyboxColor != 0) { client.rasterizerFillRectangle( client.getViewportXOffset(), client.getViewportYOffset(), client.getViewportWidth(), client.getViewportHeight(), skyboxColor ); } } final int maxX = getMaxX(); final int maxY = getMaxY(); final int maxZ = getMaxZ(); final int minLevel = getMinLevel(); final RSTile[][][] tiles = getTiles(); final int distance = isGpu ? rl$drawDistance : DEFAULT_DISTANCE; if (cameraX < 0) { cameraX = 0; } else if (cameraX >= maxX * Perspective.LOCAL_TILE_SIZE) { cameraX = maxX * Perspective.LOCAL_TILE_SIZE - 1; } if (cameraZ < 0) { cameraZ = 0; } else if (cameraZ >= maxZ * Perspective.LOCAL_TILE_SIZE) { cameraZ = maxZ * Perspective.LOCAL_TILE_SIZE - 1; } // we store the uncapped pitch for setting camera angle for the pitch relaxer // we still have to cap the pitch in order to access the visibility map, though int realPitch = cameraPitch; if (cameraPitch < PITCH_LOWER_LIMIT) { cameraPitch = PITCH_LOWER_LIMIT; } else if (cameraPitch > PITCH_UPPER_LIMIT) { cameraPitch = PITCH_UPPER_LIMIT; } if (!pitchRelaxEnabled) { realPitch = cameraPitch; } client.setCycle(client.getCycle() + 1); client.setPitchSin(Perspective.SINE[realPitch]); client.setPitchCos(Perspective.COSINE[realPitch]); client.setYawSin(Perspective.SINE[cameraYaw]); client.setYawCos(Perspective.COSINE[cameraYaw]); final int[][][] tileHeights = client.getTileHeights(); boolean[][] renderArea = client.getVisibilityMaps()[(cameraPitch - 128) / 32][cameraYaw / 64]; client.setRenderArea(renderArea); client.setCameraX2(cameraX); client.setCameraY2(cameraY); client.setCameraZ2(cameraZ); int screenCenterX = cameraX / Perspective.LOCAL_TILE_SIZE; int screenCenterZ = cameraZ / Perspective.LOCAL_TILE_SIZE; client.setScreenCenterX(screenCenterX); client.setScreenCenterZ(screenCenterZ); client.setScenePlane(plane); int minTileX = screenCenterX - distance; if (minTileX < 0) { minTileX = 0; } int minTileZ = screenCenterZ - distance; if (minTileZ < 0) { minTileZ = 0; } int maxTileX = screenCenterX + distance; if (maxTileX > maxX) { maxTileX = maxX; } int maxTileZ = screenCenterZ + distance; if (maxTileZ > maxZ) { maxTileZ = maxZ; } client.setMinTileX(minTileX); client.setMinTileZ(minTileZ); client.setMaxTileX(maxTileX); client.setMaxTileZ(maxTileZ); updateOccluders(); client.setTileUpdateCount(0); for (int z = minLevel; z < maxY; ++z) { RSTile[][] planeTiles = tiles[z]; for (int x = minTileX; x < maxTileX; ++x) { for (int y = minTileZ; y < maxTileZ; ++y) { RSTile tile = planeTiles[x][y]; if (tile != null) { if (tile.getPhysicalLevel() <= plane && (isGpu || renderArea[x - screenCenterX + DEFAULT_DISTANCE][y - screenCenterZ + DEFAULT_DISTANCE] || tileHeights[z][x][y] - cameraY >= 2000)) { tile.setDraw(true); tile.setVisible(true); tile.setDrawEntities(true); client.setTileUpdateCount(client.getTileUpdateCount() + 1); } else { tile.setDraw(false); tile.setVisible(false); tile.setWallCullDirection(0); } } } } } for (int z = minLevel; z < maxY; ++z) { RSTile[][] planeTiles = tiles[z]; for (int x = -distance; x <= 0; ++x) { int var10 = x + screenCenterX; int var16 = screenCenterX - x; if (var10 >= minTileX || var16 < maxTileX) { for (int y = -distance; y <= 0; ++y) { int var13 = y + screenCenterZ; int var14 = screenCenterZ - y; if (var10 >= minTileX) { if (var13 >= minTileZ) { RSTile tile = planeTiles[var10][var13]; if (tile != null && tile.isDraw()) { draw(tile, true); } } if (var14 < maxTileZ) { RSTile tile = planeTiles[var10][var14]; if (tile != null && tile.isDraw()) { draw(tile, true); } } } if (var16 < maxTileX) { if (var13 >= minTileZ) { RSTile tile = planeTiles[var16][var13]; if (tile != null && tile.isDraw()) { draw(tile, true); } } if (var14 < maxTileZ) { RSTile tile = planeTiles[var16][var14]; if (tile != null && tile.isDraw()) { draw(tile, true); } } } if (client.getTileUpdateCount() == 0) { if (!isGpu && (client.getOculusOrbState() != 0 && !client.getComplianceValue("orbInteraction"))) { client.setEntitiesAtMouseCount(0); } client.setCheckClick(false); if (!checkClick) { client.setViewportWalking(false); } client.getCallbacks().drawScene(); return; } } } } } for (int z = minLevel; z < maxY; ++z) { RSTile[][] planeTiles = tiles[z]; for (int x = -distance; x <= 0; ++x) { int var10 = x + screenCenterX; int var16 = screenCenterX - x; if (var10 >= minTileX || var16 < maxTileX) { for (int y = -distance; y <= 0; ++y) { int var13 = y + screenCenterZ; int var14 = screenCenterZ - y; if (var10 >= minTileX) { if (var13 >= minTileZ) { RSTile tile = planeTiles[var10][var13]; if (tile != null && tile.isDraw()) { draw(tile, false); } } if (var14 < maxTileZ) { RSTile tile = planeTiles[var10][var14]; if (tile != null && tile.isDraw()) { draw(tile, false); } } } if (var16 < maxTileX) { if (var13 >= minTileZ) { RSTile tile = planeTiles[var16][var13]; if (tile != null && tile.isDraw()) { draw(tile, false); } } if (var14 < maxTileZ) { RSTile tile = planeTiles[var16][var14]; if (tile != null && tile.isDraw()) { draw(tile, false); } } } if (client.getTileUpdateCount() == 0) { if (!isGpu && (client.getOculusOrbState() != 0 && !client.getComplianceValue("orbInteraction"))) { client.setEntitiesAtMouseCount(0); } client.setCheckClick(false); if (!checkClick) { client.setViewportWalking(false); } client.getCallbacks().drawScene(); return; } } } } } if (!isGpu && (client.getOculusOrbState() != 0 && !client.getComplianceValue("orbInteraction"))) { client.setEntitiesAtMouseCount(0); } client.setCheckClick(false); if (!checkClick) { // If checkClick was false, then the selected tile wouldn't have existed next tick, // so clear viewport walking in order to prevent it triggering a walk client.setViewportWalking(false); } client.getCallbacks().drawScene(); } @Copy("newWallDecoration") abstract public void rs$addBoundaryDecoration(int plane, int x, int y, int floor, Entity var5, Entity var6, int var7, int var8, int var9, int var10, long hash, int var12); @Replace("newWallDecoration") public void rl$addBoundaryDecoration(int plane, int x, int y, int floor, Entity var5, Entity var6, int var7, int var8, int var9, int var10, long hash, int var12) { rs$addBoundaryDecoration(plane, x, y, floor, var5, var6, var7, var8, var9, var10, hash, var12); Tile tile = getTiles()[plane][x][y]; if (tile != null) { RSWallDecoration object = (RSWallDecoration) tile.getDecorativeObject(); if (object != null) { object.setPlane(plane); } } } @Copy("newGroundItemPile") abstract public void rs$addItemPile(int plane, int x, int y, int hash, Entity var5, long var6, Entity var7, Entity var8); @Replace("newGroundItemPile") public void rl$addItemPile(int plane, int x, int y, int hash, Entity var5, long var6, Entity var7, Entity var8) { rs$addItemPile(plane, x, y, hash, var5, var6, var7, var8); Tile tile = getTiles()[plane][x][y]; if (tile != null) { RSTileItemPile itemLayer = (RSTileItemPile) tile.getItemLayer(); if (itemLayer != null) { itemLayer.setPlane(plane); } } } @Copy("newFloorDecoration") abstract public void rs$groundObjectSpawned(int plane, int x, int y, int floor, Entity var5, long hash, int var7); @Replace("newFloorDecoration") public void rl$groundObjectSpawned(int plane, int x, int y, int floor, Entity var5, long hash, int var7) { rs$groundObjectSpawned(plane, x, y, floor, var5, hash, var7); Tile tile = getTiles()[plane][x][y]; if (tile != null) { RSFloorDecoration groundObject = (RSFloorDecoration) tile.getGroundObject(); if (groundObject != null) { groundObject.setPlane(plane); } } } @Copy("newBoundaryObject") abstract public void rs$addBoundary(int plane, int x, int y, int floor, Entity var5, Entity var6, int var7, int var8, long hash, int var10); @Replace("newBoundaryObject") public void rl$addBoundary(int plane, int x, int y, int floor, Entity var5, Entity var6, int var7, int var8, long hash, int var10) { rs$addBoundary(plane, x, y, floor, var5, var6, var7, var8, hash, var10); Tile tile = getTiles()[plane][x][y]; if (tile != null) { RSBoundaryObject wallObject = (RSBoundaryObject) tile.getWallObject(); if (wallObject != null) { wallObject.setPlane(plane); } } } @Copy("drawTileUnderlay") abstract public void rs$drawTileUnderlay(TilePaint tile, int z, int pitchSin, int pitchCos, int yawSin, int yawCos, int x, int y); @Replace("drawTileUnderlay") public void rl$drawTileUnderlay(TilePaint tile, int z, int pitchSin, int pitchCos, int yawSin, int yawCos, int x, int y) { if (!client.isGpu()) { try { rs$drawTileUnderlay(tile, z, pitchSin, pitchCos, yawSin, yawCos, x, y); } catch (Exception ex) { client.getLogger().warn("error during tile underlay rendering", ex); } return; } final DrawCallbacks drawCallbacks = client.getDrawCallbacks(); if (drawCallbacks == null) { return; } try { final int[][][] tileHeights = getTileHeights(); final int cameraX2 = client.getCameraX2(); final int cameraY2 = client.getCameraY2(); final int cameraZ2 = client.getCameraZ2(); final int zoom = client.get3dZoom(); final int centerX = client.getCenterX(); final int centerY = client.getCenterY(); final int mouseX2 = client.getMouseX2(); final int mouseY2 = client.getMouseY2(); final boolean checkClick = client.isCheckClick(); int var9; int var10 = var9 = (x << 7) - cameraX2; int var11; int var12 = var11 = (y << 7) - cameraZ2; int var13; int var14 = var13 = var10 + 128; int var15; int var16 = var15 = var12 + 128; int var17 = tileHeights[z][x][y] - cameraY2; int var18 = tileHeights[z][x + 1][y] - cameraY2; int var19 = tileHeights[z][x + 1][y + 1] - cameraY2; int var20 = tileHeights[z][x][y + 1] - cameraY2; int var21 = var10 * yawCos + yawSin * var12 >> 16; var12 = var12 * yawCos - yawSin * var10 >> 16; var10 = var21; var21 = var17 * pitchCos - pitchSin * var12 >> 16; var12 = pitchSin * var17 + var12 * pitchCos >> 16; var17 = var21; if (var12 >= 50) { var21 = var14 * yawCos + yawSin * var11 >> 16; var11 = var11 * yawCos - yawSin * var14 >> 16; var14 = var21; var21 = var18 * pitchCos - pitchSin * var11 >> 16; var11 = pitchSin * var18 + var11 * pitchCos >> 16; var18 = var21; if (var11 >= 50) { var21 = var13 * yawCos + yawSin * var16 >> 16; var16 = var16 * yawCos - yawSin * var13 >> 16; var13 = var21; var21 = var19 * pitchCos - pitchSin * var16 >> 16; var16 = pitchSin * var19 + var16 * pitchCos >> 16; var19 = var21; if (var16 >= 50) { var21 = var9 * yawCos + yawSin * var15 >> 16; var15 = var15 * yawCos - yawSin * var9 >> 16; var9 = var21; var21 = var20 * pitchCos - pitchSin * var15 >> 16; var15 = pitchSin * var20 + var15 * pitchCos >> 16; if (var15 >= 50) { int dy = var10 * zoom / var12 + centerX; int dx = var17 * zoom / var12 + centerY; int cy = var14 * zoom / var11 + centerX; int cx = var18 * zoom / var11 + centerY; int ay = var13 * zoom / var16 + centerX; int ax = var19 * zoom / var16 + centerY; int by = var9 * zoom / var15 + centerX; int bx = var21 * zoom / var15 + centerY; drawCallbacks.drawScenePaint(0, pitchSin, pitchCos, yawSin, yawCos, -cameraX2, -cameraY2, -cameraZ2, tile, z, x, y, zoom, centerX, centerY); if ((ay - by) * (cx - bx) - (ax - bx) * (cy - by) > 0) { if (checkClick && client.containsBounds(mouseX2, mouseY2, ax, bx, cx, ay, by, cy)) { setTargetTile(x, y); } } if ((dy - cy) * (bx - cx) - (dx - cx) * (by - cy) > 0) { if (checkClick && client.containsBounds(mouseX2, mouseY2, dx, cx, bx, dy, cy, by)) { setTargetTile(x, y); } } } } } } } catch (Exception ex) { client.getLogger().warn("error during underlay rendering", ex); } } @Copy("drawTileOverlay") abstract public void rs$drawTileOverlay(TileModel tile, int pitchSin, int pitchCos, int yawSin, int yawCos, int x, int y); @Replace("drawTileOverlay") public void rl$drawTileOverlay(TileModel tile, int pitchSin, int pitchCos, int yawSin, int yawCos, int tileX, int tileY) { if (!client.isGpu()) { rs$drawTileOverlay(tile, pitchSin, pitchCos, yawSin, yawCos, tileX, tileY); return; } final DrawCallbacks drawCallbacks = client.getDrawCallbacks(); if (drawCallbacks == null) { return; } try { final int cameraX2 = client.getCameraX2(); final int cameraY2 = client.getCameraY2(); final int cameraZ2 = client.getCameraZ2(); final int zoom = client.get3dZoom(); final int centerX = client.getCenterX(); final int centerY = client.getCenterY(); drawCallbacks.drawSceneModel(0, pitchSin, pitchCos, yawSin, yawCos, -cameraX2, -cameraY2, -cameraZ2, tile, client.getPlane(), tileX, tileY, zoom, centerX, centerY); final boolean checkClick = client.isCheckClick(); if (!checkClick) { return; } RSTileModel tileModel = (RSTileModel) tile; final int[] faceX = tileModel.getFaceX(); final int[] faceY = tileModel.getFaceY(); final int[] faceZ = tileModel.getFaceZ(); final int[] vertexX = tileModel.getVertexX(); final int[] vertexY = tileModel.getVertexY(); final int[] vertexZ = tileModel.getVertexZ(); final int vertexCount = vertexX.length; final int faceCount = faceX.length; final int mouseX2 = client.getMouseX2(); final int mouseY2 = client.getMouseY2(); for (int i = 0; i < vertexCount; ++i) { int vx = vertexX[i] - cameraX2; int vy = vertexY[i] - cameraY2; int vz = vertexZ[i] - cameraZ2; int rotA = vz * yawSin + vx * yawCos >> 16; int rotB = vz * yawCos - vx * yawSin >> 16; int var13 = vy * pitchCos - rotB * pitchSin >> 16; int var12 = vy * pitchSin + rotB * pitchCos >> 16; if (var12 < 50) { return; } int ax = rotA * zoom / var12 + centerX; int ay = var13 * zoom / var12 + centerY; tmpX[i] = ax; tmpY[i] = ay; } for (int i = 0; i < faceCount; ++i) { int va = faceX[i]; int vb = faceY[i]; int vc = faceZ[i]; int x1 = tmpX[va]; int x2 = tmpX[vb]; int x3 = tmpX[vc]; int y1 = tmpY[va]; int y2 = tmpY[vb]; int y3 = tmpY[vc]; if ((x1 - x2) * (y3 - y2) - (y1 - y2) * (x3 - x2) > 0) { if (client.containsBounds(mouseX2, mouseY2, y1, y2, y3, x1, x2, x3)) { setTargetTile(tileX, tileY); break; } } } } catch (Exception ex) { client.getLogger().warn("error during overlay rendering", ex); } } @Inject @Override public int getDrawDistance() { return rl$drawDistance; } @Inject @Override public void setDrawDistance(int drawDistance) { rl$drawDistance = drawDistance; } @Inject private static void setTargetTile(int targetX, int targetY) { client.setSelectedSceneTileX(targetX); client.setSelectedSceneTileY(targetY); } @Override @Inject public void addItem(int id, int quantity, WorldPoint point) { final int sceneX = point.getX() - client.getBaseX(); final int sceneY = point.getY() - client.getBaseY(); final int plane = point.getPlane(); if (sceneX < 0 || sceneY < 0 || sceneX >= 104 || sceneY >= 104) { return; } RSTileItem item = client.newTileItem(); item.setId(id); item.setQuantity(quantity); RSNodeDeque[][][] groundItems = client.getGroundItemDeque(); if (groundItems[plane][sceneX][sceneY] == null) { groundItems[plane][sceneX][sceneY] = client.newNodeDeque(); } groundItems[plane][sceneX][sceneY].addFirst(item); if (plane == client.getPlane()) { client.updateItemPile(sceneX, sceneY); } } @Override @Inject public void removeItem(int id, int quantity, WorldPoint point) { final int sceneX = point.getX() - client.getBaseX(); final int sceneY = point.getY() - client.getBaseY(); final int plane = point.getPlane(); if (sceneX < 0 || sceneY < 0 || sceneX >= 104 || sceneY >= 104) { return; } RSNodeDeque items = client.getGroundItemDeque()[plane][sceneX][sceneY]; if (items == null) { return; } for (RSTileItem item = (RSTileItem) items.last(); item != null; item = (RSTileItem) items.previous()) { if (item.getId() == id && quantity == 1) { item.unlink(); break; } } if (items.last() == null) { client.getGroundItemDeque()[plane][sceneX][sceneY] = null; } client.updateItemPile(sceneX, sceneY); } @MethodHook(value = "addTile", end = true) @Inject public void rl$addTile(int z, int x, int y, int shape, int rotation, int texture, int heightSw, int heightNw, int heightNe, int heightSe, int underlaySwColor, int underlayNwColor, int underlayNeColor, int underlaySeColor, int overlaySwColor, int overlayNwColor, int overlayNeColor, int overlaySeColor, int underlayRgb, int overlayRgb) { if (shape != 0 && shape != 1) { Tile tile = getTiles()[z][x][y]; TileModel sceneTileModel = tile.getTileModel(); sceneTileModel.setUnderlaySwColor(underlaySwColor); sceneTileModel.setUnderlayNwColor(underlayNwColor); sceneTileModel.setUnderlayNeColor(underlayNeColor); sceneTileModel.setUnderlaySeColor(underlaySeColor); sceneTileModel.setOverlaySwColor(overlaySwColor); sceneTileModel.setOverlayNwColor(overlayNwColor); sceneTileModel.setOverlayNeColor(overlayNeColor); sceneTileModel.setOverlaySeColor(overlaySeColor); } } @Copy("drawTileMinimap") abstract void rs$drawTile(int[] pixels, int pixelOffset, int width, int z, int x, int y); @Replace("drawTileMinimap") public void rl$drawTile(int[] pixels, int pixelOffset, int width, int z, int x, int y) { if (!hdMinimapEnabled) { rs$drawTile(pixels, pixelOffset, width, z, x, y); return; } Tile tile = getTiles()[z][x][y]; if (tile == null) { return; } TilePaint sceneTilePaint = tile.getTilePaint(); if (sceneTilePaint != null) { int rgb = sceneTilePaint.getRBG(); if (sceneTilePaint.getSwColor() != INVALID_HSL_COLOR) { // hue and saturation int hs = sceneTilePaint.getSwColor() & ~0x7F; // I know this looks dumb (and it probably is) but I don't feel like hunting down the problem int seLightness = sceneTilePaint.getNwColor() & 0x7F; int neLightness = sceneTilePaint.getNeColor() & 0x7F; int southDeltaLightness = (sceneTilePaint.getSwColor() & 0x7F) - seLightness; int northDeltaLightness = (sceneTilePaint.getSeColor() & 0x7F) - neLightness; seLightness <<= 2; neLightness <<= 2; for (int i = 0; i < 4; i++) { if (sceneTilePaint.getTexture() == -1) { pixels[pixelOffset] = colorPalette[hs | seLightness >> 2]; pixels[pixelOffset + 1] = colorPalette[hs | seLightness * 3 + neLightness >> 4]; pixels[pixelOffset + 2] = colorPalette[hs | seLightness + neLightness >> 3]; pixels[pixelOffset + 3] = colorPalette[hs | seLightness + neLightness * 3 >> 4]; } else { int lig = 0xFF - ((seLightness >> 1) * (seLightness >> 1) >> 8); pixels[pixelOffset] = ((rgb & 0xFF00FF) * lig & ~0xFF00FF) + ((rgb & 0xFF00) * lig & 0xFF0000) >> 8; lig = 0xFF - ((seLightness * 3 + neLightness >> 3) * (seLightness * 3 + neLightness >> 3) >> 8); pixels[pixelOffset + 1] = ((rgb & 0xFF00FF) * lig & ~0xFF00FF) + ((rgb & 0xFF00) * lig & 0xFF0000) >> 8; lig = 0xFF - ((seLightness + neLightness >> 2) * (seLightness + neLightness >> 2) >> 8); pixels[pixelOffset + 2] = ((rgb & 0xFF00FF) * lig & ~0xFF00FF) + ((rgb & 0xFF00) * lig & 0xFF0000) >> 8; lig = 0xFF - ((seLightness + neLightness * 3 >> 3) * (seLightness + neLightness * 3 >> 3) >> 8); pixels[pixelOffset + 3] = ((rgb & 0xFF00FF) * lig & ~0xFF00FF) + ((rgb & 0xFF00) * lig & 0xFF0000) >> 8; } seLightness += southDeltaLightness; neLightness += northDeltaLightness; pixelOffset += width; } } else if (rgb != 0) { for (int i = 0; i < 4; i++) { pixels[pixelOffset] = rgb; pixels[pixelOffset + 1] = rgb; pixels[pixelOffset + 2] = rgb; pixels[pixelOffset + 3] = rgb; pixelOffset += width; } } return; } TileModel sceneTileModel = tile.getTileModel(); if (sceneTileModel != null) { int shape = sceneTileModel.getShape(); int rotation = sceneTileModel.getRotation(); int overlayRgb = sceneTileModel.getModelOverlay(); int underlayRgb = sceneTileModel.getModelUnderlay(); int[] points = getTileShape2D()[shape]; int[] indices = getTileRotation2D()[rotation]; int shapeOffset = 0; if (sceneTileModel.getOverlaySwColor() != INVALID_HSL_COLOR) { // hue and saturation int hs = sceneTileModel.getOverlaySwColor() & ~0x7F; int seLightness = sceneTileModel.getOverlaySeColor() & 0x7F; int neLightness = sceneTileModel.getOverlayNeColor() & 0x7F; int southDeltaLightness = (sceneTileModel.getOverlaySwColor() & 0x7F) - seLightness; int northDeltaLightness = (sceneTileModel.getOverlayNwColor() & 0x7F) - neLightness; seLightness <<= 2; neLightness <<= 2; for (int i = 0; i < 4; i++) { if (sceneTileModel.getTriangleTextureId() == null) { if (points[indices[shapeOffset++]] != 0) { pixels[pixelOffset] = colorPalette[hs | (seLightness >> 2)]; } if (points[indices[shapeOffset++]] != 0) { pixels[pixelOffset + 1] = colorPalette[hs | (seLightness * 3 + neLightness >> 4)]; } if (points[indices[shapeOffset++]] != 0) { pixels[pixelOffset + 2] = colorPalette[hs | (seLightness + neLightness >> 3)]; } if (points[indices[shapeOffset++]] != 0) { pixels[pixelOffset + 3] = colorPalette[hs | (seLightness + neLightness * 3 >> 4)]; } } else { if (points[indices[shapeOffset++]] != 0) { int lig = 0xFF - ((seLightness >> 1) * (seLightness >> 1) >> 8); pixels[pixelOffset] = ((overlayRgb & 0xFF00FF) * lig & ~0xFF00FF) + ((overlayRgb & 0xFF00) * lig & 0xFF0000) >> 8; } if (points[indices[shapeOffset++]] != 0) { int lig = 0xFF - ((seLightness * 3 + neLightness >> 3) * (seLightness * 3 + neLightness >> 3) >> 8); pixels[pixelOffset + 1] = ((overlayRgb & 0xFF00FF) * lig & ~0xFF00FF) + ((overlayRgb & 0xFF00) * lig & 0xFF0000) >> 8; } if (points[indices[shapeOffset++]] != 0) { int lig = 0xFF - ((seLightness + neLightness >> 2) * (seLightness + neLightness >> 2) >> 8); pixels[pixelOffset + 2] = ((overlayRgb & 0xFF00FF) * lig & ~0xFF00FF) + ((overlayRgb & 0xFF00) * lig & 0xFF0000) >> 8; } if (points[indices[shapeOffset++]] != 0) { int lig = 0xFF - ((seLightness + neLightness * 3 >> 3) * (seLightness + neLightness * 3 >> 3) >> 8); pixels[pixelOffset + 3] = ((overlayRgb & 0xFF00FF) * lig & ~0xFF00FF) + ((overlayRgb & 0xFF00) * lig & 0xFF0000) >> 8; } } seLightness += southDeltaLightness; neLightness += northDeltaLightness; pixelOffset += width; } if (underlayRgb != 0 && sceneTileModel.getUnderlaySwColor() != INVALID_HSL_COLOR) { pixelOffset -= width << 2; shapeOffset -= 16; hs = sceneTileModel.getUnderlaySwColor() & ~0x7F; seLightness = sceneTileModel.getUnderlaySeColor() & 0x7F; neLightness = sceneTileModel.getUnderlayNeColor() & 0x7F; southDeltaLightness = (sceneTileModel.getUnderlaySwColor() & 0x7F) - seLightness; northDeltaLightness = (sceneTileModel.getUnderlayNwColor() & 0x7F) - neLightness; seLightness <<= 2; neLightness <<= 2; for (int i = 0; i < 4; i++) { if (points[indices[shapeOffset++]] == 0) { pixels[pixelOffset] = colorPalette[hs | (seLightness >> 2)]; } if (points[indices[shapeOffset++]] == 0) { pixels[pixelOffset + 1] = colorPalette[hs | (seLightness * 3 + neLightness >> 4)]; } if (points[indices[shapeOffset++]] == 0) { pixels[pixelOffset + 2] = colorPalette[hs | (seLightness + neLightness >> 3)]; } if (points[indices[shapeOffset++]] == 0) { pixels[pixelOffset + 3] = colorPalette[hs | (seLightness + neLightness * 3 >> 4)]; } seLightness += southDeltaLightness; neLightness += northDeltaLightness; pixelOffset += width; } } } else if (underlayRgb != 0) { for (int i = 0; i < 4; i++) { pixels[pixelOffset] = points[indices[shapeOffset++]] != 0 ? overlayRgb : underlayRgb; pixels[pixelOffset + 1] = points[indices[shapeOffset++]] != 0 ? overlayRgb : underlayRgb; pixels[pixelOffset + 2] = points[indices[shapeOffset++]] != 0 ? overlayRgb : underlayRgb; pixels[pixelOffset + 3] = points[indices[shapeOffset++]] != 0 ? overlayRgb : underlayRgb; pixelOffset += width; } } else { for (int i = 0; i < 4; i++) { if (points[indices[shapeOffset++]] != 0) { pixels[pixelOffset] = overlayRgb; } if (points[indices[shapeOffset++]] != 0) { pixels[pixelOffset + 1] = overlayRgb; } if (points[indices[shapeOffset++]] != 0) { pixels[pixelOffset + 2] = overlayRgb; } if (points[indices[shapeOffset++]] != 0) { pixels[pixelOffset + 3] = overlayRgb; } pixelOffset += width; } } } } }
1
16,464
revert this for parity
open-osrs-runelite
java
@@ -15,6 +15,10 @@ #include <valgrind/helgrind.h> #endif +#define INITIAL_BATCH 100 +#define INCR_BATCH 50 +#define DECR_BATCH 25 + enum { FLAG_BLOCKED = 1 << 0,
1
#define PONY_WANT_ATOMIC_DEFS #include "actor.h" #include "../sched/scheduler.h" #include "../sched/cpu.h" #include "../mem/pool.h" #include "../gc/cycle.h" #include "../gc/trace.h" #include <string.h> #include <stdio.h> #include <assert.h> #include <dtrace.h> #ifdef USE_VALGRIND #include <valgrind/helgrind.h> #endif enum { FLAG_BLOCKED = 1 << 0, FLAG_RC_CHANGED = 1 << 1, FLAG_SYSTEM = 1 << 2, FLAG_UNSCHEDULED = 1 << 3, FLAG_PENDINGDESTROY = 1 << 4, }; static bool actor_noblock = false; static bool has_flag(pony_actor_t* actor, uint8_t flag) { return (actor->flags & flag) != 0; } static void set_flag(pony_actor_t* actor, uint8_t flag) { actor->flags |= flag; } static void unset_flag(pony_actor_t* actor, uint8_t flag) { actor->flags &= (uint8_t)~flag; } static bool handle_message(pony_ctx_t* ctx, pony_actor_t* actor, pony_msg_t* msg) { switch(msg->id) { case ACTORMSG_ACQUIRE: { pony_msgp_t* m = (pony_msgp_t*)msg; if(ponyint_gc_acquire(&actor->gc, (actorref_t*)m->p) && has_flag(actor, FLAG_BLOCKED)) { // If our rc changes, we have to tell the cycle detector before sending // any CONF messages. set_flag(actor, FLAG_RC_CHANGED); } return false; } case ACTORMSG_RELEASE: { pony_msgp_t* m = (pony_msgp_t*)msg; if(ponyint_gc_release(&actor->gc, (actorref_t*)m->p) && has_flag(actor, FLAG_BLOCKED)) { // If our rc changes, we have to tell the cycle detector before sending // any CONF messages. set_flag(actor, FLAG_RC_CHANGED); } return false; } case ACTORMSG_CONF: { if(has_flag(actor, FLAG_BLOCKED) && !has_flag(actor, FLAG_RC_CHANGED)) { // We're blocked and our RC hasn't changed since our last block // message, send confirm. pony_msgi_t* m = (pony_msgi_t*)msg; ponyint_cycle_ack(ctx, m->i); } return false; } default: { if(has_flag(actor, FLAG_BLOCKED)) { // Send unblock before continuing. We no longer need to send any // pending rc change to the cycle detector. unset_flag(actor, FLAG_BLOCKED | FLAG_RC_CHANGED); ponyint_cycle_unblock(ctx, actor); } DTRACE3(ACTOR_MSG_RUN, (uintptr_t)ctx->scheduler, (uintptr_t)actor, msg->id); actor->type->dispatch(ctx, actor, msg); return true; } } } static void try_gc(pony_ctx_t* ctx, pony_actor_t* actor) { if(!ponyint_heap_startgc(&actor->heap)) return; DTRACE1(GC_START, (uintptr_t)ctx->scheduler); ponyint_gc_mark(ctx); if(actor->type->trace != NULL) actor->type->trace(ctx, actor); ponyint_mark_done(ctx); ponyint_heap_endgc(&actor->heap); DTRACE1(GC_END, (uintptr_t)ctx->scheduler); } bool ponyint_actor_run(pony_ctx_t* ctx, pony_actor_t* actor, size_t batch) { ctx->current = actor; pony_msg_t* msg; size_t app = 0; while(actor->continuation != NULL) { msg = actor->continuation; actor->continuation = atomic_load_explicit(&msg->next, memory_order_relaxed); bool ret = handle_message(ctx, actor, msg); ponyint_pool_free(msg->index, msg); if(ret) { // If we handle an application message, try to gc. app++; try_gc(ctx, actor); if(app == batch) return !has_flag(actor, FLAG_UNSCHEDULED); } } // If we have been scheduled, the head will not be marked as empty. pony_msg_t* head = atomic_load_explicit(&actor->q.head, memory_order_relaxed); while((msg = ponyint_messageq_pop(&actor->q)) != NULL) { if(handle_message(ctx, actor, msg)) { // If we handle an application message, try to gc. app++; try_gc(ctx, actor); if(app == batch) return !has_flag(actor, FLAG_UNSCHEDULED); } // Stop handling a batch if we reach the head we found when we were // scheduled. if(msg == head) break; } // We didn't hit our app message batch limit. We now believe our queue to be // empty, but we may have received further messages. assert(app < batch); try_gc(ctx, actor); if(has_flag(actor, FLAG_UNSCHEDULED)) { // When unscheduling, don't mark the queue as empty, since we don't want // to get rescheduled if we receive a message. return false; } // If we have processed any application level messages, defer blocking. if(app > 0) return true; // Tell the cycle detector we are blocking. We may not actually block if a // message is received between now and when we try to mark our queue as // empty, but that's ok, we have still logically blocked. if(!has_flag(actor, FLAG_BLOCKED | FLAG_SYSTEM) || has_flag(actor, FLAG_RC_CHANGED)) { set_flag(actor, FLAG_BLOCKED); unset_flag(actor, FLAG_RC_CHANGED); ponyint_cycle_block(ctx, actor, &actor->gc); } // Return true (i.e. reschedule immediately) if our queue isn't empty. return !ponyint_messageq_markempty(&actor->q); } void ponyint_actor_destroy(pony_actor_t* actor) { assert(has_flag(actor, FLAG_PENDINGDESTROY)); // Make sure the actor being destroyed has finished marking its queue // as empty. Otherwise, it may spuriously see that tail and head are not // the same and fail to mark the queue as empty, resulting in it getting // rescheduled. pony_msg_t* head = NULL; do { head = atomic_load_explicit(&actor->q.head, memory_order_relaxed); } while(((uintptr_t)head & (uintptr_t)1) != (uintptr_t)1); atomic_thread_fence(memory_order_acquire); #ifdef USE_VALGRIND ANNOTATE_HAPPENS_AFTER(&actor->q.head); #endif ponyint_messageq_destroy(&actor->q); ponyint_gc_destroy(&actor->gc); ponyint_heap_destroy(&actor->heap); // Free variable sized actors correctly. ponyint_pool_free_size(actor->type->size, actor); } gc_t* ponyint_actor_gc(pony_actor_t* actor) { return &actor->gc; } heap_t* ponyint_actor_heap(pony_actor_t* actor) { return &actor->heap; } bool ponyint_actor_pendingdestroy(pony_actor_t* actor) { return has_flag(actor, FLAG_PENDINGDESTROY); } void ponyint_actor_setpendingdestroy(pony_actor_t* actor) { set_flag(actor, FLAG_PENDINGDESTROY); } void ponyint_actor_final(pony_ctx_t* ctx, pony_actor_t* actor) { // This gets run while the cycle detector is handling a message. Set the // current actor before running anything. pony_actor_t* prev = ctx->current; ctx->current = actor; // Run the actor finaliser if it has one. if(actor->type->final != NULL) actor->type->final(actor); // Run all outstanding object finalisers. ponyint_gc_final(ctx, &actor->gc); // Restore the current actor. ctx->current = prev; } void ponyint_actor_sendrelease(pony_ctx_t* ctx, pony_actor_t* actor) { ponyint_gc_sendrelease(ctx, &actor->gc); } void ponyint_actor_setsystem(pony_actor_t* actor) { set_flag(actor, FLAG_SYSTEM); } void ponyint_actor_setnoblock(bool state) { actor_noblock = state; } pony_actor_t* pony_create(pony_ctx_t* ctx, pony_type_t* type) { assert(type != NULL); DTRACE1(ACTOR_ALLOC, (uintptr_t)ctx->scheduler); // allocate variable sized actors correctly pony_actor_t* actor = (pony_actor_t*)ponyint_pool_alloc_size(type->size); memset(actor, 0, type->size); actor->type = type; ponyint_messageq_init(&actor->q); ponyint_heap_init(&actor->heap); ponyint_gc_done(&actor->gc); if(actor_noblock) ponyint_actor_setsystem(actor); if(ctx->current != NULL) { // actors begin unblocked and referenced by the creating actor actor->gc.rc = GC_INC_MORE; ponyint_gc_createactor(ctx->current, actor); } else { // no creator, so the actor isn't referenced by anything actor->gc.rc = 0; } return actor; } void ponyint_destroy(pony_actor_t* actor) { // This destroys an actor immediately. If any other actor has a reference to // this actor, the program will likely crash. The finaliser is not called. ponyint_actor_setpendingdestroy(actor); ponyint_actor_destroy(actor); } pony_msg_t* pony_alloc_msg(uint32_t index, uint32_t id) { pony_msg_t* msg = (pony_msg_t*)ponyint_pool_alloc(index); msg->index = index; msg->id = id; return msg; } pony_msg_t* pony_alloc_msg_size(size_t size, uint32_t id) { return pony_alloc_msg((uint32_t)ponyint_pool_index(size), id); } void pony_sendv(pony_ctx_t* ctx, pony_actor_t* to, pony_msg_t* m) { DTRACE2(ACTOR_MSG_SEND, (uintptr_t)ctx->scheduler, m->id); if(ponyint_messageq_push(&to->q, m)) { if(!has_flag(to, FLAG_UNSCHEDULED)) ponyint_sched_add(ctx, to); } } void pony_send(pony_ctx_t* ctx, pony_actor_t* to, uint32_t id) { pony_msg_t* m = pony_alloc_msg(POOL_INDEX(sizeof(pony_msg_t)), id); pony_sendv(ctx, to, m); } void pony_sendp(pony_ctx_t* ctx, pony_actor_t* to, uint32_t id, void* p) { pony_msgp_t* m = (pony_msgp_t*)pony_alloc_msg( POOL_INDEX(sizeof(pony_msgp_t)), id); m->p = p; pony_sendv(ctx, to, &m->msg); } void pony_sendi(pony_ctx_t* ctx, pony_actor_t* to, uint32_t id, intptr_t i) { pony_msgi_t* m = (pony_msgi_t*)pony_alloc_msg( POOL_INDEX(sizeof(pony_msgi_t)), id); m->i = i; pony_sendv(ctx, to, &m->msg); } void pony_continuation(pony_actor_t* self, pony_msg_t* m) { atomic_store_explicit(&m->next, self->continuation, memory_order_relaxed); self->continuation = m; } void* pony_alloc(pony_ctx_t* ctx, size_t size) { DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, size); return ponyint_heap_alloc(ctx->current, &ctx->current->heap, size); } void* pony_alloc_small(pony_ctx_t* ctx, uint32_t sizeclass) { DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, HEAP_MIN << sizeclass); return ponyint_heap_alloc_small(ctx->current, &ctx->current->heap, sizeclass); } void* pony_alloc_large(pony_ctx_t* ctx, size_t size) { DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, size); return ponyint_heap_alloc_large(ctx->current, &ctx->current->heap, size); } void* pony_realloc(pony_ctx_t* ctx, void* p, size_t size) { DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, size); return ponyint_heap_realloc(ctx->current, &ctx->current->heap, p, size); } void* pony_alloc_final(pony_ctx_t* ctx, size_t size, pony_final_fn final) { DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, size); void* p = ponyint_heap_alloc(ctx->current, &ctx->current->heap, size); ponyint_gc_register_final(ctx, p, final); return p; } void pony_triggergc(pony_actor_t* actor) { actor->heap.next_gc = 0; } void pony_schedule(pony_ctx_t* ctx, pony_actor_t* actor) { if(!has_flag(actor, FLAG_UNSCHEDULED)) return; unset_flag(actor, FLAG_UNSCHEDULED); ponyint_sched_add(ctx, actor); } void pony_unschedule(pony_ctx_t* ctx, pony_actor_t* actor) { if(has_flag(actor, FLAG_BLOCKED)) { ponyint_cycle_unblock(ctx, actor); unset_flag(actor, FLAG_BLOCKED | FLAG_RC_CHANGED); } set_flag(actor, FLAG_UNSCHEDULED); } void pony_become(pony_ctx_t* ctx, pony_actor_t* actor) { ctx->current = actor; } void pony_poll(pony_ctx_t* ctx) { assert(ctx->current != NULL); ponyint_actor_run(ctx, ctx->current, 1); }
1
9,335
INCR_BATCH and DECR_BATCH could be exposed as tuneable command line options but that might be too low level to expose. I think keeping private for now where "in the know" folks could use to try and tune makes sense. These values were choosen mostly "at random" and have worked out for us thus far.
ponylang-ponyc
c
@@ -27,6 +27,11 @@ static tagWriter *writerTable [WRITER_COUNT] = { static tagWriter *writer; +extern void setCustomTagWriter(tagWriter *w) +{ + writer = w; +} + extern void setTagWriter (writerType wtype) { writer = writerTable [wtype];
1
/* * Copyright (c) 2016, Red Hat, Inc. * Copyright (c) 2016, Masatake YAMATO * * This source code is released for free distribution under the terms of the * GNU General Public License version 2 or (at your option) any later version. * */ #include "general.h" #include "entry_p.h" #include "writer_p.h" extern tagWriter uCtagsWriter; extern tagWriter eCtagsWriter; extern tagWriter etagsWriter; extern tagWriter xrefWriter; extern tagWriter jsonWriter; static tagWriter *writerTable [WRITER_COUNT] = { [WRITER_U_CTAGS] = &uCtagsWriter, [WRITER_E_CTAGS] = &eCtagsWriter, [WRITER_ETAGS] = &etagsWriter, [WRITER_XREF] = &xrefWriter, [WRITER_JSON] = &jsonWriter, }; static tagWriter *writer; extern void setTagWriter (writerType wtype) { writer = writerTable [wtype]; writer->type = wtype; } extern void writerSetup (MIO *mio) { if (writer->preWriteEntry) writer->private = writer->preWriteEntry (writer, mio); else writer->private = NULL; } extern bool writerTeardown (MIO *mio, const char *filename) { if (writer->postWriteEntry) { bool r; r = writer->postWriteEntry (writer, mio, filename); writer->private = NULL; return r; } return false; } extern int writerWriteTag (MIO * mio, const tagEntryInfo *const tag) { return writer->writeEntry (writer, mio, tag); } extern int writerWritePtag (MIO * mio, const ptagDesc *desc, const char *const fileName, const char *const pattern, const char *const parserName) { if (writer->writePtagEntry == NULL) return -1; return writer->writePtagEntry (writer, mio, desc, fileName, pattern, parserName); } extern bool ptagMakeCtagsOutputMode (ptagDesc *desc, void *data CTAGS_ATTR_UNUSED) { const char *mode =""; if (&uCtagsWriter == writer) mode = "u-ctags"; else if (&eCtagsWriter == writer) mode = "e-ctags"; return writePseudoTag (desc, mode, "u-ctags or e-ctags", NULL); } extern const char *outputDefaultFileName (void) { return writer->defaultFileName; } extern bool writerCanPrintPtag (void) { return (writer->writePtagEntry)? true: false; } extern bool writerDoesTreatFieldAsFixed (int fieldType) { if (writer->treatFieldAsFixed) return writer->treatFieldAsFixed (fieldType); return false; }
1
17,618
Currently this isn't very clean because we have to re-use some of the predefined parser types - the demo uses WRITER_U_CTAGS but there should be some support for custom writer type.
universal-ctags-ctags
c
@@ -1,6 +1,7 @@ module RequestSpecHelper def get_json(url) get(url) + puts response.pretty_inspect JSON.parse(response.body) end
1
module RequestSpecHelper def get_json(url) get(url) JSON.parse(response.body) end # requires IntegrationSpecHelper def login_as(user) setup_mock_auth(:myusa, user) get '/auth/myusa/callback' end def time_to_json(time) time.utc.iso8601(3) end # Add support for testing `options` requests in rspec. # https://gist.github.com/melcher/8854953 def options(*args) reset! unless integration_session integration_session.__send__(:process, :options, *args).tap do copy_session_variables! end end end
1
17,224
I think this was left it accidentally :)
18F-C2
rb
@@ -64,12 +64,12 @@ func IsNamespaceSupported(ns NamespaceType) bool { func NamespaceTypes() []NamespaceType { return []NamespaceType{ + NEWUSER, // Keep user NS always first, don't move it. + NEWIPC, + NEWUTS, NEWNET, NEWPID, NEWNS, - NEWUTS, - NEWIPC, - NEWUSER, } }
1
// +build linux freebsd package configs import ( "fmt" "os" "sync" ) const ( NEWNET NamespaceType = "NEWNET" NEWPID NamespaceType = "NEWPID" NEWNS NamespaceType = "NEWNS" NEWUTS NamespaceType = "NEWUTS" NEWIPC NamespaceType = "NEWIPC" NEWUSER NamespaceType = "NEWUSER" ) var ( nsLock sync.Mutex supportedNamespaces = make(map[NamespaceType]bool) ) // NsName converts the namespace type to its filename func NsName(ns NamespaceType) string { switch ns { case NEWNET: return "net" case NEWNS: return "mnt" case NEWPID: return "pid" case NEWIPC: return "ipc" case NEWUSER: return "user" case NEWUTS: return "uts" } return "" } // IsNamespaceSupported returns whether a namespace is available or // not func IsNamespaceSupported(ns NamespaceType) bool { nsLock.Lock() defer nsLock.Unlock() supported, ok := supportedNamespaces[ns] if ok { return supported } nsFile := NsName(ns) // if the namespace type is unknown, just return false if nsFile == "" { return false } _, err := os.Stat(fmt.Sprintf("/proc/self/ns/%s", nsFile)) // a namespace is supported if it exists and we have permissions to read it supported = err == nil supportedNamespaces[ns] = supported return supported } func NamespaceTypes() []NamespaceType { return []NamespaceType{ NEWNET, NEWPID, NEWNS, NEWUTS, NEWIPC, NEWUSER, } } // Namespace defines configuration for each namespace. It specifies an // alternate path that is able to be joined via setns. type Namespace struct { Type NamespaceType `json:"type"` Path string `json:"path"` } func (n *Namespace) GetPath(pid int) string { if n.Path != "" { return n.Path } return fmt.Sprintf("/proc/%d/ns/%s", pid, NsName(n.Type)) } func (n *Namespaces) Remove(t NamespaceType) bool { i := n.index(t) if i == -1 { return false } *n = append((*n)[:i], (*n)[i+1:]...) return true } func (n *Namespaces) Add(t NamespaceType, path string) { i := n.index(t) if i == -1 { *n = append(*n, Namespace{Type: t, Path: path}) return } (*n)[i].Path = path } func (n *Namespaces) index(t NamespaceType) int { for i, ns := range *n { if ns.Type == t { return i } } return -1 } func (n *Namespaces) Contains(t NamespaceType) bool { return n.index(t) != -1 } func (n *Namespaces) PathOf(t NamespaceType) string { i := n.index(t) if i == -1 { return "" } return (*n)[i].Path }
1
14,671
Can you finish re-ordering this list to match the one that was deleted?
opencontainers-runc
go